diff --git a/base/atomiccounter/atomiccounter.go b/base/atomiccounter/atomiccounter.go deleted file mode 100644 index 9846df1c74..0000000000 --- a/base/atomiccounter/atomiccounter.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2018, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package atomiccounter implements a basic atomic int64 counter. -package atomiccounter - -import ( - "sync/atomic" -) - -// Counter implements a basic atomic int64 counter. -type Counter int64 - -// Add adds to counter. -func (a *Counter) Add(inc int64) int64 { - return atomic.AddInt64((*int64)(a), inc) -} - -// Sub subtracts from counter. -func (a *Counter) Sub(dec int64) int64 { - return atomic.AddInt64((*int64)(a), -dec) -} - -// Inc increments by 1. -func (a *Counter) Inc() int64 { - return atomic.AddInt64((*int64)(a), 1) -} - -// Dec decrements by 1. -func (a *Counter) Dec() int64 { - return atomic.AddInt64((*int64)(a), -1) -} - -// Value returns the current value. -func (a *Counter) Value() int64 { - return atomic.LoadInt64((*int64)(a)) -} - -// Set sets the counter to a new value. -func (a *Counter) Set(val int64) { - atomic.StoreInt64((*int64)(a), val) -} - -// Swap swaps a new value in and returns the old value. -func (a *Counter) Swap(val int64) int64 { - return atomic.SwapInt64((*int64)(a), val) -} diff --git a/base/atomiccounter/atomiccounter_test.go b/base/atomiccounter/atomiccounter_test.go deleted file mode 100644 index 327700d2eb..0000000000 --- a/base/atomiccounter/atomiccounter_test.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package atomiccounter - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestCounter(t *testing.T) { - var c Counter - assert.Equal(t, int64(0), c.Value()) - assert.Equal(t, int64(1), c.Inc()) - assert.Equal(t, int64(1), c.Value()) - assert.Equal(t, int64(2), c.Add(1)) - assert.Equal(t, int64(2), c.Value()) - assert.Equal(t, int64(1), c.Sub(1)) - assert.Equal(t, int64(1), c.Value()) - assert.Equal(t, int64(0), c.Dec()) - assert.Equal(t, int64(0), c.Value()) - assert.Equal(t, int64(0), c.Swap(1)) - assert.Equal(t, int64(1), c.Value()) - c.Set(2) - assert.Equal(t, int64(2), c.Value()) -} diff --git a/base/exec/stdio.go b/base/exec/stdio.go index f871bd6284..3c04bebaa4 100644 --- a/base/exec/stdio.go +++ b/base/exec/stdio.go @@ -56,9 +56,13 @@ func (st *StdIO) Set(o *StdIO) *StdIO { func (st *StdIO) SetToOS() *StdIO { cur := &StdIO{} cur.SetFromOS() + if sif, ok := st.In.(*os.File); ok { + os.Stdin = sif + } else { + fmt.Printf("In is not an *os.File: %#v\n", st.In) + } os.Stdout = st.Out.(*os.File) os.Stderr = st.Err.(*os.File) - os.Stdin = st.In.(*os.File) return cur } @@ -98,13 +102,10 @@ func IsPipe(rw any) bool { if rw == nil { return false } - w, ok := rw.(io.Writer) + _, ok := rw.(io.Writer) if !ok { return false } - if w == os.Stdout { - return false - } of, ok := rw.(*os.File) if !ok { return false @@ -247,6 +248,9 @@ func (st *StdIOState) PopToStart() { for len(st.InStack) > st.InStart { st.PopIn() } + for len(st.PipeIn) > 0 { + CloseReader(st.PipeIn.Pop()) + } } // ErrIsInOut returns true if the given Err writer is also present diff --git a/base/exec/stdio_test.go b/base/exec/stdio_test.go index 3b8f041799..764ea1174d 100644 --- a/base/exec/stdio_test.go +++ b/base/exec/stdio_test.go @@ -18,14 +18,14 @@ func TestStdIO(t *testing.T) { assert.Equal(t, os.Stdout, st.Out) assert.Equal(t, os.Stderr, st.Err) assert.Equal(t, os.Stdin, st.In) - assert.Equal(t, false, st.OutIsPipe()) + // assert.Equal(t, false, st.OutIsPipe()) obuf := &bytes.Buffer{} ibuf := &bytes.Buffer{} var ss StdIOState ss.SetFromOS() ss.StackStart() - assert.Equal(t, false, ss.OutIsPipe()) + // assert.Equal(t, false, ss.OutIsPipe()) ss.PushOut(obuf) assert.NotEqual(t, os.Stdout, ss.Out) diff --git a/base/fileinfo/fileinfo.go b/base/fileinfo/fileinfo.go index 28caffa376..0ef6bf11f4 100644 --- a/base/fileinfo/fileinfo.go +++ b/base/fileinfo/fileinfo.go @@ -75,6 +75,10 @@ type FileInfo struct { //types:add // version control system status, when enabled VCS vcs.FileStatus `table:"-"` + // Generated indicates that the file is generated and should not be edited. + // For Go files, this regex: `^// Code generated .* DO NOT EDIT\.$` is used. + Generated bool `table:"-"` + // full path to file, including name; for file functions Path string `table:"-"` } @@ -143,6 +147,7 @@ func (fi *FileInfo) SetMimeInfo() error { } fi.Cat = UnknownCategory fi.Known = Unknown + fi.Generated = IsGeneratedFile(fi.Path) fi.Kind = "" mtyp, _, err := MimeFromFile(fi.Path) if err != nil { diff --git a/base/fileinfo/mimetype.go b/base/fileinfo/mimetype.go index d44bcc819f..f0529b8916 100644 --- a/base/fileinfo/mimetype.go +++ b/base/fileinfo/mimetype.go @@ -7,7 +7,9 @@ package fileinfo import ( "fmt" "mime" + "os" "path/filepath" + "regexp" "strings" "github.com/h2non/filetype" @@ -99,6 +101,18 @@ func MimeFromFile(fname string) (mtype, ext string, err error) { return "", ext, fmt.Errorf("fileinfo.MimeFromFile could not find mime type for ext: %v file: %v", ext, fn) } +var generatedRe = regexp.MustCompile(`^// Code generated .* DO NOT EDIT`) + +func IsGeneratedFile(fname string) bool { + file, err := os.Open(fname) + if err != nil { + return false + } + head := make([]byte, 2048) + file.Read(head) + return generatedRe.Match(head) +} + // todo: use this to check against mime types! // MimeToKindMapInit makes sure the MimeToKindMap is initialized from @@ -316,7 +330,7 @@ var StandardMimes = []MimeType{ {"text/x-forth", []string{".frt"}, Code, Forth}, // note: ".fs" conflicts with fsharp {"text/x-fortran", []string{".f", ".F"}, Code, Fortran}, {"text/x-fsharp", []string{".fs", ".fsi"}, Code, FSharp}, - {"text/x-gosrc", []string{".go", ".mod", ".work", ".cosh"}, Code, Go}, + {"text/x-gosrc", []string{".go", ".mod", ".work", ".goal"}, Code, Go}, {"text/x-haskell", []string{".hs", ".lhs"}, Code, Haskell}, {"text/x-literate-haskell", nil, Code, Haskell}, // todo: not sure if same or not diff --git a/base/fileinfo/typegen.go b/base/fileinfo/typegen.go index 0b89bbbf00..e8f4015e30 100644 --- a/base/fileinfo/typegen.go +++ b/base/fileinfo/typegen.go @@ -6,4 +6,4 @@ import ( "cogentcore.org/core/types" ) -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/base/fileinfo.FileInfo", IDName: "file-info", Doc: "FileInfo represents the information about a given file / directory,\nincluding icon, mimetype, etc", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Methods: []types.Method{{Name: "Duplicate", Doc: "Duplicate creates a copy of given file -- only works for regular files, not\ndirectories.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Returns: []string{"string", "error"}}, {Name: "Delete", Doc: "Delete moves the file to the trash / recycling bin.\nOn mobile and web, it deletes it directly.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Returns: []string{"error"}}, {Name: "Rename", Doc: "Rename renames (moves) this file to given new path name.\nUpdates the FileInfo setting to the new name, although it might\nbe out of scope if it moved into a new path", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"path"}, Returns: []string{"newpath", "err"}}}, Fields: []types.Field{{Name: "Ic", Doc: "icon for file"}, {Name: "Name", Doc: "name of the file, without any path"}, {Name: "Size", Doc: "size of the file"}, {Name: "Kind", Doc: "type of file / directory; shorter, more user-friendly\nversion of mime type, based on category"}, {Name: "Mime", Doc: "full official mime type of the contents"}, {Name: "Cat", Doc: "functional category of the file, based on mime data etc"}, {Name: "Known", Doc: "known file type"}, {Name: "Mode", Doc: "file mode bits"}, {Name: "ModTime", Doc: "time that contents (only) were last modified"}, {Name: "VCS", Doc: "version control system status, when enabled"}, {Name: "Path", Doc: "full path to file, including name; for file functions"}}}) +var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/base/fileinfo.FileInfo", IDName: "file-info", Doc: "FileInfo represents the information about a given file / directory,\nincluding icon, mimetype, etc", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Methods: []types.Method{{Name: "Duplicate", Doc: "Duplicate creates a copy of given file -- only works for regular files, not\ndirectories.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Returns: []string{"string", "error"}}, {Name: "Delete", Doc: "Delete moves the file to the trash / recycling bin.\nOn mobile and web, it deletes it directly.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Returns: []string{"error"}}, {Name: "Rename", Doc: "Rename renames (moves) this file to given new path name.\nUpdates the FileInfo setting to the new name, although it might\nbe out of scope if it moved into a new path", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"path"}, Returns: []string{"newpath", "err"}}}, Fields: []types.Field{{Name: "Ic", Doc: "icon for file"}, {Name: "Name", Doc: "name of the file, without any path"}, {Name: "Size", Doc: "size of the file"}, {Name: "Kind", Doc: "type of file / directory; shorter, more user-friendly\nversion of mime type, based on category"}, {Name: "Mime", Doc: "full official mime type of the contents"}, {Name: "Cat", Doc: "functional category of the file, based on mime data etc"}, {Name: "Known", Doc: "known file type"}, {Name: "Mode", Doc: "file mode bits"}, {Name: "ModTime", Doc: "time that contents (only) were last modified"}, {Name: "VCS", Doc: "version control system status, when enabled"}, {Name: "Generated", Doc: "Generated indicates that the file is generated and should not be edited.\nFor Go files, this regex: `^// Code generated .* DO NOT EDIT\\.$` is used."}, {Name: "Path", Doc: "full path to file, including name; for file functions"}}}) diff --git a/base/fsx/fsx.go b/base/fsx/fsx.go index b757ee5f05..9fb0d865fa 100644 --- a/base/fsx/fsx.go +++ b/base/fsx/fsx.go @@ -17,6 +17,10 @@ import ( "time" ) +// Filename is used to open a file picker dialog when used as an argument +// type in a function, or as a field value. +type Filename string + // GoSrcDir tries to locate dir in GOPATH/src/ or GOROOT/src/pkg/ and returns its // full path. GOPATH may contain a list of paths. From Robin Elkind github.com/mewkiz/pkg. func GoSrcDir(dir string) (absDir string, err error) { diff --git a/base/generate/generate.go b/base/generate/generate.go index 4a41bd7e3a..c2446d2723 100644 --- a/base/generate/generate.go +++ b/base/generate/generate.go @@ -54,16 +54,28 @@ func PrintHeader(w io.Writer, pkg string, imports ...string) { } } -// Inspect goes through all of the files in the given package -// and calls the given function on each node in files that -// are not generated. The bool return value from the given function +// ExcludeFile returns true if the given file is on the exclude list. +func ExcludeFile(pkg *packages.Package, file *ast.File, exclude ...string) bool { + fpos := pkg.Fset.Position(file.FileStart) + _, fname := filepath.Split(fpos.Filename) + for _, ex := range exclude { + if fname == ex { + return true + } + } + return false +} + +// Inspect goes through all of the files in the given package, +// except those listed in the exclude list, and calls the given +// function on each node. The bool return value from the given function // indicates whether to continue traversing down the AST tree // of that node and look at its children. If a non-nil error value // is returned by the given function, the traversal of the tree is // stopped and the error value is returned. -func Inspect(pkg *packages.Package, f func(n ast.Node) (bool, error)) error { +func Inspect(pkg *packages.Package, f func(n ast.Node) (bool, error), exclude ...string) error { for _, file := range pkg.Syntax { - if ast.IsGenerated(file) { + if ExcludeFile(pkg, file, exclude...) { continue } var terr error diff --git a/base/iox/imagex/testing.go b/base/iox/imagex/testing.go index 1e2f3a91c5..28cec1db34 100644 --- a/base/iox/imagex/testing.go +++ b/base/iox/imagex/testing.go @@ -12,6 +12,8 @@ import ( "os" "path/filepath" "strings" + + "cogentcore.org/core/base/num" ) // TestingT is an interface wrapper around *testing.T @@ -56,6 +58,25 @@ func CompareColors(cc, ic color.RGBA, tol int) bool { return true } +// DiffImage returns the difference between two images, +// with pixels having the abs of the difference between pixels. +func DiffImage(a, b image.Image) image.Image { + ab := a.Bounds() + di := image.NewRGBA(ab) + for y := ab.Min.Y; y < ab.Max.Y; y++ { + for x := ab.Min.X; x < ab.Max.X; x++ { + cc := color.RGBAModel.Convert(a.At(x, y)).(color.RGBA) + ic := color.RGBAModel.Convert(b.At(x, y)).(color.RGBA) + r := uint8(num.Abs(int(cc.R) - int(ic.R))) + g := uint8(num.Abs(int(cc.G) - int(ic.G))) + b := uint8(num.Abs(int(cc.B) - int(ic.B))) + c := color.RGBA{r, g, b, 255} + di.Set(x, y, c) + } + } + return di +} + // Assert asserts that the given image is equivalent // to the image stored at the given filename in the testdata directory, // with ".png" added to the filename if there is no extension @@ -77,6 +98,7 @@ func Assert(t TestingT, img image.Image, filename string) { ext := filepath.Ext(filename) failFilename := strings.TrimSuffix(filename, ext) + ".fail" + ext + diffFilename := strings.TrimSuffix(filename, ext) + ".diff" + ext if UpdateTestImages { err := Save(img, filename) @@ -87,6 +109,7 @@ func Assert(t TestingT, img image.Image, filename string) { if err != nil { t.Errorf("AssertImage: error removing old fail image: %v", err) } + os.RemoveAll(diffFilename) return } @@ -133,10 +156,15 @@ func Assert(t TestingT, img image.Image, filename string) { if err != nil { t.Errorf("AssertImage: error saving fail image: %v", err) } + err = Save(DiffImage(img, fimg), diffFilename) + if err != nil { + t.Errorf("AssertImage: error saving diff image: %v", err) + } } else { err := os.RemoveAll(failFilename) if err != nil { t.Errorf("AssertImage: error removing old fail image: %v", err) } + os.RemoveAll(diffFilename) } } diff --git a/base/keylist/README.md b/base/keylist/README.md new file mode 100644 index 0000000000..c89691916a --- /dev/null +++ b/base/keylist/README.md @@ -0,0 +1,6 @@ +# keylist + +keylist implements an ordered list (slice) of items (Values), with a map from a Key (e.g., names) to indexes, to support fast lookup by name. There is also a Keys slice. + +This is a different implementation of the [ordmap](../ordmap) package, and has the advantage of direct slice access to the values, instead of having to go through the KeyValue tuple struct in ordmap. + diff --git a/base/keylist/keylist.go b/base/keylist/keylist.go new file mode 100644 index 0000000000..0f41d23e0f --- /dev/null +++ b/base/keylist/keylist.go @@ -0,0 +1,229 @@ +// Copyright (c) 2024, Cogent Core. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package keylist implements an ordered list (slice) of items, +with a map from a key (e.g., names) to indexes, +to support fast lookup by name. +This is a different implementation of the [ordmap] package, +that has separate slices for Values and Keys, instead of +using a tuple list of both. The awkwardness of value access +through the tuple is the major problem with ordmap. +*/ +package keylist + +import ( + "fmt" + "slices" +) + +// TODO: probably want to consolidate ordmap and keylist: https://github.com/cogentcore/core/issues/1224 + +// List implements an ordered list (slice) of Values, +// with a map from a key (e.g., names) to indexes, +// to support fast lookup by name. +type List[K comparable, V any] struct { //types:add + // List is the ordered slice of items. + Values []V + + // Keys is the ordered list of keys, in same order as [List.Values] + Keys []K + + // indexes is the key-to-index mapping. + indexes map[K]int +} + +// New returns a new [List]. The zero value +// is usable without initialization, so this is +// just a simple standard convenience method. +func New[K comparable, V any]() *List[K, V] { + return &List[K, V]{} +} + +func (kl *List[K, V]) makeIndexes() { + kl.indexes = make(map[K]int) +} + +// initIndexes ensures that the index map exists. +func (kl *List[K, V]) initIndexes() { + if kl.indexes == nil { + kl.makeIndexes() + } +} + +// Reset resets the list, removing any existing elements. +func (kl *List[K, V]) Reset() { + kl.Values = nil + kl.Keys = nil + kl.makeIndexes() +} + +// Set sets given key to given value, adding to the end of the list +// if not already present, and otherwise replacing with this new value. +// This is the same semantics as a Go map. +// See [List.Add] for version that only adds and does not replace. +func (kl *List[K, V]) Set(key K, val V) { + kl.initIndexes() + if idx, ok := kl.indexes[key]; ok { + kl.Values[idx] = val + kl.Keys[idx] = key + return + } + kl.indexes[key] = len(kl.Values) + kl.Values = append(kl.Values, val) + kl.Keys = append(kl.Keys, key) +} + +// Add adds an item to the list with given key, +// An error is returned if the key is already on the list. +// See [List.Set] for a method that automatically replaces. +func (kl *List[K, V]) Add(key K, val V) error { + kl.initIndexes() + if _, ok := kl.indexes[key]; ok { + return fmt.Errorf("keylist.Add: key %v is already on the list", key) + } + kl.indexes[key] = len(kl.Values) + kl.Values = append(kl.Values, val) + kl.Keys = append(kl.Keys, key) + return nil +} + +// Insert inserts the given value with the given key at the given index. +// This is relatively slow because it needs regenerate the keys list. +// It panics if the key already exists because the behavior is undefined +// in that situation. +func (kl *List[K, V]) Insert(idx int, key K, val V) { + if _, has := kl.indexes[key]; has { + panic("keylist.Add: key is already on the list") + } + + kl.Keys = slices.Insert(kl.Keys, idx, key) + kl.Values = slices.Insert(kl.Values, idx, val) + kl.makeIndexes() + for i, k := range kl.Keys { + kl.indexes[k] = i + } +} + +// At returns the value corresponding to the given key, +// with a zero value returned for a missing key. See [List.AtTry] +// for one that returns a bool for missing keys. +// For index-based access, use [List.Values] or [List.Keys] slices directly. +func (kl *List[K, V]) At(key K) V { + idx, ok := kl.indexes[key] + if ok { + return kl.Values[idx] + } + var zv V + return zv +} + +// AtTry returns the value corresponding to the given key, +// with false returned for a missing key, in case the zero value +// is not diagnostic. +func (kl *List[K, V]) AtTry(key K) (V, bool) { + idx, ok := kl.indexes[key] + if ok { + return kl.Values[idx], true + } + var zv V + return zv, false +} + +// IndexIsValid returns an error if the given index is invalid. +func (kl *List[K, V]) IndexIsValid(idx int) error { + if idx >= len(kl.Values) || idx < 0 { + return fmt.Errorf("keylist.List: IndexIsValid: index %d is out of range of a list of length %d", idx, len(kl.Values)) + } + return nil +} + +// IndexByKey returns the index of the given key, with a -1 for missing key. +func (kl *List[K, V]) IndexByKey(key K) int { + idx, ok := kl.indexes[key] + if !ok { + return -1 + } + return idx +} + +// Len returns the number of items in the list. +func (kl *List[K, V]) Len() int { + if kl == nil { + return 0 + } + return len(kl.Values) +} + +// DeleteByIndex deletes item(s) within the index range [i:j]. +// This is relatively slow because it needs to regenerate the +// index map. +func (kl *List[K, V]) DeleteByIndex(i, j int) { + ndel := j - i + if ndel <= 0 { + panic("index range is <= 0") + } + kl.Keys = slices.Delete(kl.Keys, i, j) + kl.Values = slices.Delete(kl.Values, i, j) + kl.makeIndexes() + for i, k := range kl.Keys { + kl.indexes[k] = i + } + +} + +// DeleteByKey deletes the item with the given key, +// returning false if it does not find it. +// This is relatively slow because it needs to regenerate the +// index map. +func (kl *List[K, V]) DeleteByKey(key K) bool { + idx, ok := kl.indexes[key] + if !ok { + return false + } + kl.DeleteByIndex(idx, idx+1) + return true +} + +// RenameIndex renames the item at given index to new key. +func (kl *List[K, V]) RenameIndex(i int, key K) { + old := kl.Keys[i] + delete(kl.indexes, old) + kl.Keys[i] = key + kl.indexes[key] = i +} + +// Copy copies all of the entries from the given key list +// into this list. It keeps existing entries in this +// list unless they also exist in the given list, in which case +// they are overwritten. Use [List.Reset] first to get an exact copy. +func (kl *List[K, V]) Copy(from *List[K, V]) { + for i, v := range from.Values { + kl.Set(kl.Keys[i], v) + } +} + +// String returns a string representation of the list. +func (kl *List[K, V]) String() string { + sv := "{" + for i, v := range kl.Values { + sv += fmt.Sprintf("%v", kl.Keys[i]) + ": " + fmt.Sprintf("%v", v) + ", " + } + sv += "}" + return sv +} + +/* +// GoString returns the list as Go code. +func (kl *List[K, V]) GoString() string { + var zk K + var zv V + res := fmt.Sprintf("ordlist.Make([]ordlist.KeyVal[%T, %T]{\n", zk, zv) + for _, kv := range kl.Order { + res += fmt.Sprintf("{%#v, %#v},\n", kv.Key, kv.Value) + } + res += "})" + return res +} +*/ diff --git a/base/keylist/keylist_test.go b/base/keylist/keylist_test.go new file mode 100644 index 0000000000..b88876ecf0 --- /dev/null +++ b/base/keylist/keylist_test.go @@ -0,0 +1,39 @@ +// Copyright (c) 2024, Cogent Core. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keylist + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestKeyList(t *testing.T) { + kl := New[string, int]() + kl.Add("key0", 0) + kl.Add("key1", 1) + kl.Add("key2", 2) + + assert.Equal(t, 1, kl.At("key1")) + assert.Equal(t, 2, kl.IndexByKey("key2")) + + assert.Equal(t, 1, kl.Values[1]) + + assert.Equal(t, 3, kl.Len()) + + kl.DeleteByIndex(1, 2) + assert.Equal(t, 2, kl.Values[1]) + assert.Equal(t, 1, kl.IndexByKey("key2")) + + kl.Insert(0, "new0", 3) + assert.Equal(t, 3, kl.Values[0]) + assert.Equal(t, 0, kl.Values[1]) + assert.Equal(t, 2, kl.IndexByKey("key2")) + + // nm := Make([]KeyValue[string, int]{{"one", 1}, {"two", 2}, {"three", 3}}) + // assert.Equal(t, 3, nm.Values[2]) + // assert.Equal(t, 2, nm.Values[1]) + // assert.Equal(t, 3, nm.At("three")) +} diff --git a/base/metadata/metadata.go b/base/metadata/metadata.go index 1fbc1852e2..a2f964ed79 100644 --- a/base/metadata/metadata.go +++ b/base/metadata/metadata.go @@ -2,15 +2,27 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// Package metadata provides a map of named any elements +// with generic support for type-safe Get and nil-safe Set. +// Metadata keys often function as optional fields in a struct, +// and therefore a CamelCase naming convention is typical. +// Provides default support for "Name", "Doc", "File" standard keys. package metadata import ( "fmt" "maps" + + "cogentcore.org/core/base/errors" ) // Data is metadata as a map of named any elements // with generic support for type-safe Get and nil-safe Set. +// Metadata keys often function as optional fields in a struct, +// and therefore a CamelCase naming convention is typical. +// Provides default support for "Name" and "Doc" standard keys. +// In general it is good practice to provide access functions +// that establish standard key names, to avoid issues with typos. type Data map[string]any func (md *Data) init() { @@ -26,8 +38,8 @@ func (md *Data) Set(key string, value any) { (*md)[key] = value } -// Get gets metadata value of given type. -// returns error if not present or item is a different type. +// Get gets metadata value of given type from given Data. +// Returns error if not present or item is a different type. func Get[T any](md Data, key string) (T, error) { var z T x, ok := md[key] @@ -41,14 +53,73 @@ func Get[T any](md Data, key string) (T, error) { return v, nil } -// Copy does a shallow copy of metadata from source. +// CopyFrom does a shallow copy of metadata from source. // Any pointer-based values will still point to the same // underlying data as the source, but the two maps remain // distinct. It uses [maps.Copy]. -func (md *Data) Copy(src Data) { +func (md *Data) CopyFrom(src Data) { if src == nil { return } md.init() maps.Copy(*md, src) } + +//////// Metadataer + +// Metadataer is an interface for a type that returns associated +// metadata.Data using a Metadata() method. To be able to set metadata, +// the method should be defined with a pointer receiver. +type Metadataer interface { + Metadata() *Data +} + +// GetData gets the Data from given object, if it implements the +// Metadata() method. Returns nil if it does not. +// Must pass a pointer to the object. +func GetData(obj any) *Data { + if md, ok := obj.(Metadataer); ok { + return md.Metadata() + } + return nil +} + +// GetFrom gets metadata value of given type from given object, +// if it implements the Metadata() method. +// Must pass a pointer to the object. +// Returns error if not present or item is a different type. +func GetFrom[T any](obj any, key string) (T, error) { + md := GetData(obj) + if md == nil { + var zv T + return zv, errors.New("metadata not available for given object type") + } + return Get[T](*md, key) +} + +// SetTo sets metadata value on given object, if it implements +// the Metadata() method. Returns error if no Metadata on object. +// Must pass a pointer to the object. +func SetTo(obj any, key string, value any) error { + md := GetData(obj) + if md == nil { + return errors.Log(errors.New("metadata not available for given object type")) + } + md.Set(key, value) + return nil +} + +// CopyFrom copies metadata from source +// Must pass a pointer to the object. +func CopyFrom(to, src any) *Data { + tod := GetData(to) + if tod == nil { + return nil + } + srcd := GetData(src) + if srcd == nil { + return tod + } + tod.CopyFrom(*srcd) + return tod +} diff --git a/base/metadata/metadata_test.go b/base/metadata/metadata_test.go new file mode 100644 index 0000000000..43b2735020 --- /dev/null +++ b/base/metadata/metadata_test.go @@ -0,0 +1,32 @@ +// Copyright (c) 2024, Cogent Core. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package metadata + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type mytest struct { + Meta Data +} + +func (mt *mytest) Metadata() *Data { + return &mt.Meta +} + +func TestMetadata(t *testing.T) { + mt := &mytest{} + + SetName(mt, "test") + assert.Equal(t, "test", Name(mt)) + + SetDoc(mt, "this is good") + assert.Equal(t, "this is good", Doc(mt)) + + SetFilename(mt, "path/me.go") + assert.Equal(t, "path/me.go", Filename(mt)) +} diff --git a/base/metadata/std.go b/base/metadata/std.go new file mode 100644 index 0000000000..74eca83052 --- /dev/null +++ b/base/metadata/std.go @@ -0,0 +1,51 @@ +// Copyright (c) 2024, Cogent Core. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package metadata + +import "os" + +// SetName sets the "Name" standard key. +func SetName(obj any, name string) { + SetTo(obj, "Name", name) +} + +// Name returns the "Name" standard key value (empty if not set). +func Name(obj any) string { + nm, _ := GetFrom[string](obj, "Name") + return nm +} + +// SetDoc sets the "Doc" standard key. +func SetDoc(obj any, doc string) { + SetTo(obj, "Doc", doc) +} + +// Doc returns the "Doc" standard key value (empty if not set). +func Doc(obj any) string { + doc, _ := GetFrom[string](obj, "Doc") + return doc +} + +// SetFile sets the "File" standard key for *os.File. +func SetFile(obj any, file *os.File) { + SetTo(obj, "File", file) +} + +// File returns the "File" standard key value (nil if not set). +func File(obj any) *os.File { + doc, _ := GetFrom[*os.File](obj, "File") + return doc +} + +// SetFilename sets the "Filename" standard key. +func SetFilename(obj any, file string) { + SetTo(obj, "Filename", file) +} + +// Filename returns the "Filename" standard key value (empty if not set). +func Filename(obj any) string { + doc, _ := GetFrom[string](obj, "Filename") + return doc +} diff --git a/base/mpi/README.md b/base/mpi/README.md deleted file mode 100644 index f874c65452..0000000000 --- a/base/mpi/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# mpi - -`mpi` contains Go wrappers around the MPI message passing interface for distributed memory computation. This has no other dependencies and uses code generation to provide support for all Go types. - -The default without any build tags is to build a dummy version that has 1 proc of rank 0 always, and nop versions of all the methods. - -There are two supported versions of mpi, selected using the corresponding build tag: -* `mpi` = [open mpi](https://www.open-mpi.org/), installed with a [pkg-config](https://en.wikipedia.org/wiki/Pkg-config) named `ompi` -* `mpich` = [mpich](https://www.mpich.org/), installed with a `pkg-config` named `mpich` - -It is possible to add other versions by adding another build tag and adding the corresponding line to the `#cgo` directives in the relevant files in the `mpi` package. - -For example, to build with open mpi, build your program like this: -```sh -$ go build -tags mpi -``` - -# Install - -On a mac, you can use `brew install open-mpi` or `brew install mpich` to install. Corresponding package manager versions are presumably available on linux, and mpi is usually already supported on HPC clusters. - -# Development - -After updating any of the template files, you need to update the generated go files like so: -```bash -cd mpi -go install github.com/apache/arrow/go/arrow/_tools/tmpl@latest -make generate -``` diff --git a/base/mpi/doc.go b/base/mpi/doc.go deleted file mode 100644 index 7d2d51b5ea..0000000000 --- a/base/mpi/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) 2020, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package mpi wraps the Message Passing Interface for distributed memory -data sharing across a collection of processors (procs). - -The wrapping code was initially copied from https://github.com/cpmech/gosl/mpi -and significantly modified. - -All standard Go types are supported using the apache arrow tmpl generation tool. -Int is assumed to be 64bit and is defined as a []int because that is typically -more convenient. - -Use the build tags: -tags mpi for open mpi, and -tags mpich for mpich -Without tags, a "dummy" implementation is present, which has 1 proc of -rank 0 always, and nop versions of all the methods. -*/ -package mpi diff --git a/base/mpi/dummy.gen.go b/base/mpi/dummy.gen.go deleted file mode 100644 index 4728577dcf..0000000000 --- a/base/mpi/dummy.gen.go +++ /dev/null @@ -1,672 +0,0 @@ -// Code generated by dummy.gen.go.tmpl. DO NOT EDIT. - -// Copyright (c) 2020, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !mpi && !mpich - -package mpi - -// SendF64 sends values to toProc, using given unique tag identifier. -// This is Blocking. Must have a corresponding Recv call with same tag on toProc, from this proc -func (cm *Comm) SendF64(toProc int, tag int, vals []float64) error { - return nil -} - -// Recv64F64 receives values from proc fmProc, using given unique tag identifier -// This is Blocking. Must have a corresponding Send call with same tag on fmProc, to this proc -func (cm *Comm) RecvF64(fmProc int, tag int, vals []float64) error { - return nil -} - -// BcastF64 broadcasts slice from fmProc to all other procs. -// All nodes have the same vals after this call, copied from fmProc. -func (cm *Comm) BcastF64(fmProc int, vals []float64) error { - return nil -} - -// ReduceF64 reduces all values across procs to toProc in orig to dest using given operation. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ReduceF64(toProc int, op Op, dest, orig []float64) error { - return nil -} - -// AllReduceF64 reduces all values across procs to all procs from orig into dest using given operation. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllReduceF64(op Op, dest, orig []float64) error { - return nil -} - -// GatherF64 gathers values from all procs into toProc proc, tiled into dest of size np * len(orig). -// This is inverse of Scatter. -// IMPORTANT: orig and dest must be different slices. -func (cm *Comm) GatherF64(toProc int, dest, orig []float64) error { - return nil -} - -// AllGatherF64 gathers values from all procs into all procs, -// tiled by proc into dest of size np * len(orig). -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllGatherF64(dest, orig []float64) error { - return nil -} - -// ScatterF64 scatters values from fmProc to all procs, distributing len(dest) size chunks to -// each proc from orig slice, which must be of size np * len(dest). This is inverse of Gather. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ScatterF64(fmProc int, dest, orig []float64) error { - return nil -} - -// SendF32 sends values to toProc, using given unique tag identifier. -// This is Blocking. Must have a corresponding Recv call with same tag on toProc, from this proc -func (cm *Comm) SendF32(toProc int, tag int, vals []float32) error { - return nil -} - -// Recv64F32 receives values from proc fmProc, using given unique tag identifier -// This is Blocking. Must have a corresponding Send call with same tag on fmProc, to this proc -func (cm *Comm) RecvF32(fmProc int, tag int, vals []float32) error { - return nil -} - -// BcastF32 broadcasts slice from fmProc to all other procs. -// All nodes have the same vals after this call, copied from fmProc. -func (cm *Comm) BcastF32(fmProc int, vals []float32) error { - return nil -} - -// ReduceF32 reduces all values across procs to toProc in orig to dest using given operation. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ReduceF32(toProc int, op Op, dest, orig []float32) error { - return nil -} - -// AllReduceF32 reduces all values across procs to all procs from orig into dest using given operation. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllReduceF32(op Op, dest, orig []float32) error { - return nil -} - -// GatherF32 gathers values from all procs into toProc proc, tiled into dest of size np * len(orig). -// This is inverse of Scatter. -// IMPORTANT: orig and dest must be different slices. -func (cm *Comm) GatherF32(toProc int, dest, orig []float32) error { - return nil -} - -// AllGatherF32 gathers values from all procs into all procs, -// tiled by proc into dest of size np * len(orig). -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllGatherF32(dest, orig []float32) error { - return nil -} - -// ScatterF32 scatters values from fmProc to all procs, distributing len(dest) size chunks to -// each proc from orig slice, which must be of size np * len(dest). This is inverse of Gather. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ScatterF32(fmProc int, dest, orig []float32) error { - return nil -} - -// SendInt sends values to toProc, using given unique tag identifier. -// This is Blocking. Must have a corresponding Recv call with same tag on toProc, from this proc -func (cm *Comm) SendInt(toProc int, tag int, vals []int) error { - return nil -} - -// Recv64Int receives values from proc fmProc, using given unique tag identifier -// This is Blocking. Must have a corresponding Send call with same tag on fmProc, to this proc -func (cm *Comm) RecvInt(fmProc int, tag int, vals []int) error { - return nil -} - -// BcastInt broadcasts slice from fmProc to all other procs. -// All nodes have the same vals after this call, copied from fmProc. -func (cm *Comm) BcastInt(fmProc int, vals []int) error { - return nil -} - -// ReduceInt reduces all values across procs to toProc in orig to dest using given operation. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ReduceInt(toProc int, op Op, dest, orig []int) error { - return nil -} - -// AllReduceInt reduces all values across procs to all procs from orig into dest using given operation. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllReduceInt(op Op, dest, orig []int) error { - return nil -} - -// GatherInt gathers values from all procs into toProc proc, tiled into dest of size np * len(orig). -// This is inverse of Scatter. -// IMPORTANT: orig and dest must be different slices. -func (cm *Comm) GatherInt(toProc int, dest, orig []int) error { - return nil -} - -// AllGatherInt gathers values from all procs into all procs, -// tiled by proc into dest of size np * len(orig). -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllGatherInt(dest, orig []int) error { - return nil -} - -// ScatterInt scatters values from fmProc to all procs, distributing len(dest) size chunks to -// each proc from orig slice, which must be of size np * len(dest). This is inverse of Gather. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ScatterInt(fmProc int, dest, orig []int) error { - return nil -} - -// SendI64 sends values to toProc, using given unique tag identifier. -// This is Blocking. Must have a corresponding Recv call with same tag on toProc, from this proc -func (cm *Comm) SendI64(toProc int, tag int, vals []int64) error { - return nil -} - -// Recv64I64 receives values from proc fmProc, using given unique tag identifier -// This is Blocking. Must have a corresponding Send call with same tag on fmProc, to this proc -func (cm *Comm) RecvI64(fmProc int, tag int, vals []int64) error { - return nil -} - -// BcastI64 broadcasts slice from fmProc to all other procs. -// All nodes have the same vals after this call, copied from fmProc. -func (cm *Comm) BcastI64(fmProc int, vals []int64) error { - return nil -} - -// ReduceI64 reduces all values across procs to toProc in orig to dest using given operation. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ReduceI64(toProc int, op Op, dest, orig []int64) error { - return nil -} - -// AllReduceI64 reduces all values across procs to all procs from orig into dest using given operation. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllReduceI64(op Op, dest, orig []int64) error { - return nil -} - -// GatherI64 gathers values from all procs into toProc proc, tiled into dest of size np * len(orig). -// This is inverse of Scatter. -// IMPORTANT: orig and dest must be different slices. -func (cm *Comm) GatherI64(toProc int, dest, orig []int64) error { - return nil -} - -// AllGatherI64 gathers values from all procs into all procs, -// tiled by proc into dest of size np * len(orig). -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllGatherI64(dest, orig []int64) error { - return nil -} - -// ScatterI64 scatters values from fmProc to all procs, distributing len(dest) size chunks to -// each proc from orig slice, which must be of size np * len(dest). This is inverse of Gather. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ScatterI64(fmProc int, dest, orig []int64) error { - return nil -} - -// SendU64 sends values to toProc, using given unique tag identifier. -// This is Blocking. Must have a corresponding Recv call with same tag on toProc, from this proc -func (cm *Comm) SendU64(toProc int, tag int, vals []uint64) error { - return nil -} - -// Recv64U64 receives values from proc fmProc, using given unique tag identifier -// This is Blocking. Must have a corresponding Send call with same tag on fmProc, to this proc -func (cm *Comm) RecvU64(fmProc int, tag int, vals []uint64) error { - return nil -} - -// BcastU64 broadcasts slice from fmProc to all other procs. -// All nodes have the same vals after this call, copied from fmProc. -func (cm *Comm) BcastU64(fmProc int, vals []uint64) error { - return nil -} - -// ReduceU64 reduces all values across procs to toProc in orig to dest using given operation. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ReduceU64(toProc int, op Op, dest, orig []uint64) error { - return nil -} - -// AllReduceU64 reduces all values across procs to all procs from orig into dest using given operation. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllReduceU64(op Op, dest, orig []uint64) error { - return nil -} - -// GatherU64 gathers values from all procs into toProc proc, tiled into dest of size np * len(orig). -// This is inverse of Scatter. -// IMPORTANT: orig and dest must be different slices. -func (cm *Comm) GatherU64(toProc int, dest, orig []uint64) error { - return nil -} - -// AllGatherU64 gathers values from all procs into all procs, -// tiled by proc into dest of size np * len(orig). -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllGatherU64(dest, orig []uint64) error { - return nil -} - -// ScatterU64 scatters values from fmProc to all procs, distributing len(dest) size chunks to -// each proc from orig slice, which must be of size np * len(dest). This is inverse of Gather. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ScatterU64(fmProc int, dest, orig []uint64) error { - return nil -} - -// SendI32 sends values to toProc, using given unique tag identifier. -// This is Blocking. Must have a corresponding Recv call with same tag on toProc, from this proc -func (cm *Comm) SendI32(toProc int, tag int, vals []int32) error { - return nil -} - -// Recv64I32 receives values from proc fmProc, using given unique tag identifier -// This is Blocking. Must have a corresponding Send call with same tag on fmProc, to this proc -func (cm *Comm) RecvI32(fmProc int, tag int, vals []int32) error { - return nil -} - -// BcastI32 broadcasts slice from fmProc to all other procs. -// All nodes have the same vals after this call, copied from fmProc. -func (cm *Comm) BcastI32(fmProc int, vals []int32) error { - return nil -} - -// ReduceI32 reduces all values across procs to toProc in orig to dest using given operation. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ReduceI32(toProc int, op Op, dest, orig []int32) error { - return nil -} - -// AllReduceI32 reduces all values across procs to all procs from orig into dest using given operation. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllReduceI32(op Op, dest, orig []int32) error { - return nil -} - -// GatherI32 gathers values from all procs into toProc proc, tiled into dest of size np * len(orig). -// This is inverse of Scatter. -// IMPORTANT: orig and dest must be different slices. -func (cm *Comm) GatherI32(toProc int, dest, orig []int32) error { - return nil -} - -// AllGatherI32 gathers values from all procs into all procs, -// tiled by proc into dest of size np * len(orig). -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllGatherI32(dest, orig []int32) error { - return nil -} - -// ScatterI32 scatters values from fmProc to all procs, distributing len(dest) size chunks to -// each proc from orig slice, which must be of size np * len(dest). This is inverse of Gather. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ScatterI32(fmProc int, dest, orig []int32) error { - return nil -} - -// SendU32 sends values to toProc, using given unique tag identifier. -// This is Blocking. Must have a corresponding Recv call with same tag on toProc, from this proc -func (cm *Comm) SendU32(toProc int, tag int, vals []uint32) error { - return nil -} - -// Recv64U32 receives values from proc fmProc, using given unique tag identifier -// This is Blocking. Must have a corresponding Send call with same tag on fmProc, to this proc -func (cm *Comm) RecvU32(fmProc int, tag int, vals []uint32) error { - return nil -} - -// BcastU32 broadcasts slice from fmProc to all other procs. -// All nodes have the same vals after this call, copied from fmProc. -func (cm *Comm) BcastU32(fmProc int, vals []uint32) error { - return nil -} - -// ReduceU32 reduces all values across procs to toProc in orig to dest using given operation. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ReduceU32(toProc int, op Op, dest, orig []uint32) error { - return nil -} - -// AllReduceU32 reduces all values across procs to all procs from orig into dest using given operation. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllReduceU32(op Op, dest, orig []uint32) error { - return nil -} - -// GatherU32 gathers values from all procs into toProc proc, tiled into dest of size np * len(orig). -// This is inverse of Scatter. -// IMPORTANT: orig and dest must be different slices. -func (cm *Comm) GatherU32(toProc int, dest, orig []uint32) error { - return nil -} - -// AllGatherU32 gathers values from all procs into all procs, -// tiled by proc into dest of size np * len(orig). -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllGatherU32(dest, orig []uint32) error { - return nil -} - -// ScatterU32 scatters values from fmProc to all procs, distributing len(dest) size chunks to -// each proc from orig slice, which must be of size np * len(dest). This is inverse of Gather. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ScatterU32(fmProc int, dest, orig []uint32) error { - return nil -} - -// SendI16 sends values to toProc, using given unique tag identifier. -// This is Blocking. Must have a corresponding Recv call with same tag on toProc, from this proc -func (cm *Comm) SendI16(toProc int, tag int, vals []int16) error { - return nil -} - -// Recv64I16 receives values from proc fmProc, using given unique tag identifier -// This is Blocking. Must have a corresponding Send call with same tag on fmProc, to this proc -func (cm *Comm) RecvI16(fmProc int, tag int, vals []int16) error { - return nil -} - -// BcastI16 broadcasts slice from fmProc to all other procs. -// All nodes have the same vals after this call, copied from fmProc. -func (cm *Comm) BcastI16(fmProc int, vals []int16) error { - return nil -} - -// ReduceI16 reduces all values across procs to toProc in orig to dest using given operation. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ReduceI16(toProc int, op Op, dest, orig []int16) error { - return nil -} - -// AllReduceI16 reduces all values across procs to all procs from orig into dest using given operation. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllReduceI16(op Op, dest, orig []int16) error { - return nil -} - -// GatherI16 gathers values from all procs into toProc proc, tiled into dest of size np * len(orig). -// This is inverse of Scatter. -// IMPORTANT: orig and dest must be different slices. -func (cm *Comm) GatherI16(toProc int, dest, orig []int16) error { - return nil -} - -// AllGatherI16 gathers values from all procs into all procs, -// tiled by proc into dest of size np * len(orig). -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllGatherI16(dest, orig []int16) error { - return nil -} - -// ScatterI16 scatters values from fmProc to all procs, distributing len(dest) size chunks to -// each proc from orig slice, which must be of size np * len(dest). This is inverse of Gather. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ScatterI16(fmProc int, dest, orig []int16) error { - return nil -} - -// SendU16 sends values to toProc, using given unique tag identifier. -// This is Blocking. Must have a corresponding Recv call with same tag on toProc, from this proc -func (cm *Comm) SendU16(toProc int, tag int, vals []uint16) error { - return nil -} - -// Recv64U16 receives values from proc fmProc, using given unique tag identifier -// This is Blocking. Must have a corresponding Send call with same tag on fmProc, to this proc -func (cm *Comm) RecvU16(fmProc int, tag int, vals []uint16) error { - return nil -} - -// BcastU16 broadcasts slice from fmProc to all other procs. -// All nodes have the same vals after this call, copied from fmProc. -func (cm *Comm) BcastU16(fmProc int, vals []uint16) error { - return nil -} - -// ReduceU16 reduces all values across procs to toProc in orig to dest using given operation. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ReduceU16(toProc int, op Op, dest, orig []uint16) error { - return nil -} - -// AllReduceU16 reduces all values across procs to all procs from orig into dest using given operation. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllReduceU16(op Op, dest, orig []uint16) error { - return nil -} - -// GatherU16 gathers values from all procs into toProc proc, tiled into dest of size np * len(orig). -// This is inverse of Scatter. -// IMPORTANT: orig and dest must be different slices. -func (cm *Comm) GatherU16(toProc int, dest, orig []uint16) error { - return nil -} - -// AllGatherU16 gathers values from all procs into all procs, -// tiled by proc into dest of size np * len(orig). -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllGatherU16(dest, orig []uint16) error { - return nil -} - -// ScatterU16 scatters values from fmProc to all procs, distributing len(dest) size chunks to -// each proc from orig slice, which must be of size np * len(dest). This is inverse of Gather. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ScatterU16(fmProc int, dest, orig []uint16) error { - return nil -} - -// SendI8 sends values to toProc, using given unique tag identifier. -// This is Blocking. Must have a corresponding Recv call with same tag on toProc, from this proc -func (cm *Comm) SendI8(toProc int, tag int, vals []int8) error { - return nil -} - -// Recv64I8 receives values from proc fmProc, using given unique tag identifier -// This is Blocking. Must have a corresponding Send call with same tag on fmProc, to this proc -func (cm *Comm) RecvI8(fmProc int, tag int, vals []int8) error { - return nil -} - -// BcastI8 broadcasts slice from fmProc to all other procs. -// All nodes have the same vals after this call, copied from fmProc. -func (cm *Comm) BcastI8(fmProc int, vals []int8) error { - return nil -} - -// ReduceI8 reduces all values across procs to toProc in orig to dest using given operation. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ReduceI8(toProc int, op Op, dest, orig []int8) error { - return nil -} - -// AllReduceI8 reduces all values across procs to all procs from orig into dest using given operation. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllReduceI8(op Op, dest, orig []int8) error { - return nil -} - -// GatherI8 gathers values from all procs into toProc proc, tiled into dest of size np * len(orig). -// This is inverse of Scatter. -// IMPORTANT: orig and dest must be different slices. -func (cm *Comm) GatherI8(toProc int, dest, orig []int8) error { - return nil -} - -// AllGatherI8 gathers values from all procs into all procs, -// tiled by proc into dest of size np * len(orig). -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllGatherI8(dest, orig []int8) error { - return nil -} - -// ScatterI8 scatters values from fmProc to all procs, distributing len(dest) size chunks to -// each proc from orig slice, which must be of size np * len(dest). This is inverse of Gather. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ScatterI8(fmProc int, dest, orig []int8) error { - return nil -} - -// SendU8 sends values to toProc, using given unique tag identifier. -// This is Blocking. Must have a corresponding Recv call with same tag on toProc, from this proc -func (cm *Comm) SendU8(toProc int, tag int, vals []uint8) error { - return nil -} - -// Recv64U8 receives values from proc fmProc, using given unique tag identifier -// This is Blocking. Must have a corresponding Send call with same tag on fmProc, to this proc -func (cm *Comm) RecvU8(fmProc int, tag int, vals []uint8) error { - return nil -} - -// BcastU8 broadcasts slice from fmProc to all other procs. -// All nodes have the same vals after this call, copied from fmProc. -func (cm *Comm) BcastU8(fmProc int, vals []uint8) error { - return nil -} - -// ReduceU8 reduces all values across procs to toProc in orig to dest using given operation. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ReduceU8(toProc int, op Op, dest, orig []uint8) error { - return nil -} - -// AllReduceU8 reduces all values across procs to all procs from orig into dest using given operation. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllReduceU8(op Op, dest, orig []uint8) error { - return nil -} - -// GatherU8 gathers values from all procs into toProc proc, tiled into dest of size np * len(orig). -// This is inverse of Scatter. -// IMPORTANT: orig and dest must be different slices. -func (cm *Comm) GatherU8(toProc int, dest, orig []uint8) error { - return nil -} - -// AllGatherU8 gathers values from all procs into all procs, -// tiled by proc into dest of size np * len(orig). -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllGatherU8(dest, orig []uint8) error { - return nil -} - -// ScatterU8 scatters values from fmProc to all procs, distributing len(dest) size chunks to -// each proc from orig slice, which must be of size np * len(dest). This is inverse of Gather. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ScatterU8(fmProc int, dest, orig []uint8) error { - return nil -} - -// SendC128 sends values to toProc, using given unique tag identifier. -// This is Blocking. Must have a corresponding Recv call with same tag on toProc, from this proc -func (cm *Comm) SendC128(toProc int, tag int, vals []complex128) error { - return nil -} - -// Recv64C128 receives values from proc fmProc, using given unique tag identifier -// This is Blocking. Must have a corresponding Send call with same tag on fmProc, to this proc -func (cm *Comm) RecvC128(fmProc int, tag int, vals []complex128) error { - return nil -} - -// BcastC128 broadcasts slice from fmProc to all other procs. -// All nodes have the same vals after this call, copied from fmProc. -func (cm *Comm) BcastC128(fmProc int, vals []complex128) error { - return nil -} - -// ReduceC128 reduces all values across procs to toProc in orig to dest using given operation. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ReduceC128(toProc int, op Op, dest, orig []complex128) error { - return nil -} - -// AllReduceC128 reduces all values across procs to all procs from orig into dest using given operation. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllReduceC128(op Op, dest, orig []complex128) error { - return nil -} - -// GatherC128 gathers values from all procs into toProc proc, tiled into dest of size np * len(orig). -// This is inverse of Scatter. -// IMPORTANT: orig and dest must be different slices. -func (cm *Comm) GatherC128(toProc int, dest, orig []complex128) error { - return nil -} - -// AllGatherC128 gathers values from all procs into all procs, -// tiled by proc into dest of size np * len(orig). -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllGatherC128(dest, orig []complex128) error { - return nil -} - -// ScatterC128 scatters values from fmProc to all procs, distributing len(dest) size chunks to -// each proc from orig slice, which must be of size np * len(dest). This is inverse of Gather. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ScatterC128(fmProc int, dest, orig []complex128) error { - return nil -} - -// SendC64 sends values to toProc, using given unique tag identifier. -// This is Blocking. Must have a corresponding Recv call with same tag on toProc, from this proc -func (cm *Comm) SendC64(toProc int, tag int, vals []complex64) error { - return nil -} - -// Recv64C64 receives values from proc fmProc, using given unique tag identifier -// This is Blocking. Must have a corresponding Send call with same tag on fmProc, to this proc -func (cm *Comm) RecvC64(fmProc int, tag int, vals []complex64) error { - return nil -} - -// BcastC64 broadcasts slice from fmProc to all other procs. -// All nodes have the same vals after this call, copied from fmProc. -func (cm *Comm) BcastC64(fmProc int, vals []complex64) error { - return nil -} - -// ReduceC64 reduces all values across procs to toProc in orig to dest using given operation. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ReduceC64(toProc int, op Op, dest, orig []complex64) error { - return nil -} - -// AllReduceC64 reduces all values across procs to all procs from orig into dest using given operation. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllReduceC64(op Op, dest, orig []complex64) error { - return nil -} - -// GatherC64 gathers values from all procs into toProc proc, tiled into dest of size np * len(orig). -// This is inverse of Scatter. -// IMPORTANT: orig and dest must be different slices. -func (cm *Comm) GatherC64(toProc int, dest, orig []complex64) error { - return nil -} - -// AllGatherC64 gathers values from all procs into all procs, -// tiled by proc into dest of size np * len(orig). -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllGatherC64(dest, orig []complex64) error { - return nil -} - -// ScatterC64 scatters values from fmProc to all procs, distributing len(dest) size chunks to -// each proc from orig slice, which must be of size np * len(dest). This is inverse of Gather. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ScatterC64(fmProc int, dest, orig []complex64) error { - return nil -} diff --git a/base/mpi/dummy.go b/base/mpi/dummy.go deleted file mode 100644 index 88d582dc62..0000000000 --- a/base/mpi/dummy.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright (c) 2020, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !mpi && !mpich - -package mpi - -// this file provides dummy versions, built by default, so mpi can be included -// generically without incurring additional complexity. - -// set LogErrors to control whether MPI errors are automatically logged or not -var LogErrors = true - -// Op is an aggregation operation: Sum, Min, Max, etc -type Op int - -const ( - OpSum Op = iota - OpMax - OpMin - OpProd - OpLAND // logical AND - OpLOR // logical OR - OpBAND // bitwise AND - OpBOR // bitwise OR -) - -const ( - // Root is the rank 0 node -- it is more semantic to use this - Root int = 0 -) - -// IsOn tells whether MPI is on or not -// -// NOTE: this returns true even after Stop -func IsOn() bool { - return false -} - -// Init initialises MPI -func Init() { -} - -// InitThreadSafe initialises MPI thread safe -func InitThreadSafe() error { - return nil -} - -// Finalize finalises MPI (frees resources, shuts it down) -func Finalize() { -} - -// WorldRank returns this proc's rank/ID within the World communicator. -// Returns 0 if not yet initialized, so it is always safe to call. -func WorldRank() (rank int) { - return 0 -} - -// WorldSize returns the number of procs in the World communicator. -// Returns 1 if not yet initialized, so it is always safe to call. -func WorldSize() (size int) { - return 1 -} - -// Comm is the MPI communicator -- all MPI communication operates as methods -// on this struct. It holds the MPI_Comm communicator and MPI_Group for -// sub-World group communication. -type Comm struct { -} - -// NewComm creates a new communicator. -// if ranks is nil, communicator is for World (all active procs). -// otherwise, defined a group-level commuicator for given ranks. -func NewComm(ranks []int) (*Comm, error) { - cm := &Comm{} - return cm, nil -} - -// Rank returns the rank/ID for this proc -func (cm *Comm) Rank() (rank int) { - return 0 -} - -// Size returns the number of procs in this communicator -func (cm *Comm) Size() (size int) { - return 1 -} - -// Abort aborts MPI -func (cm *Comm) Abort() error { - return nil -} - -// Barrier forces synchronisation -func (cm *Comm) Barrier() error { - return nil -} diff --git a/base/mpi/mpi.go b/base/mpi/mpi.go deleted file mode 100644 index 1c3aeea744..0000000000 --- a/base/mpi/mpi.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright (c) 2020, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Copied and significantly modified from: https://github.com/cpmech/gosl/mpi -// Copyright 2016 The Gosl Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build mpi || mpich - -package mpi - -/* -#cgo mpi pkg-config: ompi -#cgo mpich pkg-config: mpich -#include "mpi.h" - -MPI_Comm World = MPI_COMM_WORLD; -*/ -import "C" - -import ( - "fmt" - "log" - "unsafe" -) - -// set LogErrors to control whether MPI errors are automatically logged or not -var LogErrors = true - -// Error takes an MPI error code and returns an appropriate error -// value -- either nil if no error, or the MPI error message -// with given context -func Error(ec C.int, ctxt string) error { - if ec == C.MPI_SUCCESS { - return nil - } - var rsz C.int - str := C.malloc(C.size_t(C.MPI_MAX_ERROR_STRING)) - - C.MPI_Error_string(C.int(ec), (*C.char)(str), &rsz) - gstr := C.GoStringN((*C.char)(str), rsz) - // C.free(str) - err := fmt.Errorf("MPI Error: %d %s %s", ec, ctxt, gstr) - if LogErrors { - log.Println(err) - } - return err -} - -// Op is an aggregation operation: Sum, Min, Max, etc -type Op int - -const ( - OpSum Op = iota - OpMax - OpMin - OpProd // Product - OpLAND // logical AND - OpLOR // logical OR - OpBAND // bitwise AND - OpBOR // bitwise OR - -) - -func (op Op) ToC() C.MPI_Op { - switch op { - case OpSum: - return C.MPI_SUM - case OpMax: - return C.MPI_MAX - case OpMin: - return C.MPI_MIN - case OpProd: - return C.MPI_PROD - case OpLAND: - return C.MPI_LAND - case OpLOR: - return C.MPI_LOR - case OpBAND: - return C.MPI_BAND - case OpBOR: - return C.MPI_BOR - } - return C.MPI_SUM -} - -const ( - // Root is the rank 0 node -- it is more semantic to use this - Root int = 0 -) - -// IsOn tells whether MPI is on or not -// -// NOTE: this returns true even after Stop -func IsOn() bool { - var flag C.int - C.MPI_Initialized(&flag) - if flag != 0 { - return true - } - return false -} - -// Init initialises MPI -func Init() { - C.MPI_Init(nil, nil) -} - -// InitThreadSafe initialises MPI thread safe -func InitThreadSafe() error { - var r int32 - C.MPI_Init_thread(nil, nil, C.MPI_THREAD_MULTIPLE, (*C.int)(unsafe.Pointer(&r))) - if r != C.MPI_THREAD_MULTIPLE { - return fmt.Errorf("MPI_THREAD_MULTIPLE can't be set: got %d", r) - } - return nil -} - -// Finalize finalises MPI (frees resources, shuts it down) -func Finalize() { - C.MPI_Finalize() -} - -// WorldRank returns this proc's rank/ID within the World communicator. -// Returns 0 if not yet initialized, so it is always safe to call. -func WorldRank() (rank int) { - if !IsOn() { - return 0 - } - var r int32 - C.MPI_Comm_rank(C.World, (*C.int)(unsafe.Pointer(&r))) - return int(r) -} - -// WorldSize returns the number of procs in the World communicator. -// Returns 1 if not yet initialized, so it is always safe to call. -func WorldSize() (size int) { - if !IsOn() { - return 1 - } - var s int32 - C.MPI_Comm_size(C.World, (*C.int)(unsafe.Pointer(&s))) - return int(s) -} - -// Comm is the MPI communicator -- all MPI communication operates as methods -// on this struct. It holds the MPI_Comm communicator and MPI_Group for -// sub-World group communication. -type Comm struct { - comm C.MPI_Comm - group C.MPI_Group -} - -// NewComm creates a new communicator. -// if ranks is nil, communicator is for World (all active procs). -// otherwise, defined a group-level commuicator for given ranks. -func NewComm(ranks []int) (*Comm, error) { - cm := &Comm{} - if len(ranks) == 0 { - cm.comm = C.World - return cm, Error(C.MPI_Comm_group(C.World, &cm.group), "MPI_Comm_group") - } - rs := make([]int32, len(ranks)) - for i := 0; i < len(ranks); i++ { - rs[i] = int32(ranks[i]) - } - n := C.int(len(ranks)) - r := (*C.int)(unsafe.Pointer(&rs[0])) - var wgroup C.MPI_Group - C.MPI_Comm_group(C.World, &wgroup) - C.MPI_Group_incl(wgroup, n, r, &cm.group) - return cm, Error(C.MPI_Comm_create(C.World, cm.group, &cm.comm), "Comm_create") -} - -// Rank returns the rank/ID for this proc -func (cm *Comm) Rank() (rank int) { - var r int32 - C.MPI_Comm_rank(cm.comm, (*C.int)(unsafe.Pointer(&r))) - return int(r) -} - -// Size returns the number of procs in this communicator -func (cm *Comm) Size() (size int) { - var s int32 - C.MPI_Comm_size(cm.comm, (*C.int)(unsafe.Pointer(&s))) - return int(s) -} - -// Abort aborts MPI -func (cm *Comm) Abort() error { - return Error(C.MPI_Abort(cm.comm, 0), "Abort") -} - -// Barrier forces synchronisation -func (cm *Comm) Barrier() error { - return Error(C.MPI_Barrier(cm.comm), "Barrier") -} diff --git a/base/mpi/numeric.gen.go b/base/mpi/numeric.gen.go deleted file mode 100644 index ffd4f37df7..0000000000 --- a/base/mpi/numeric.gen.go +++ /dev/null @@ -1,1099 +0,0 @@ -// Code generated by numeric.gen.go.tmpl. DO NOT EDIT. - -// Copyright (c) 2020, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build mpi || mpich - -package mpi - -/* -#cgo mpi pkg-config: ompi -#cgo mpich pkg-config: mpich -#include "mpi.h" - -MPI_Datatype FLOAT64 = MPI_DOUBLE; -MPI_Datatype FLOAT32 = MPI_FLOAT; -MPI_Datatype INT64 = MPI_LONG; -MPI_Datatype UINT64 = MPI_UNSIGNED_LONG; -MPI_Datatype INT32 = MPI_INT; -MPI_Datatype UINT32 = MPI_UNSIGNED; -MPI_Datatype INT16 = MPI_SHORT; -MPI_Datatype UINT16 = MPI_UNSIGNED_SHORT; -MPI_Datatype BYTE = MPI_BYTE; -MPI_Datatype COMPLEX128 = MPI_DOUBLE_COMPLEX; -MPI_Datatype COMPLEX64 = MPI_COMPLEX; -MPI_Status* StIgnore = MPI_STATUS_IGNORE; -*/ -import "C" - -import ( - "unsafe" -) - -// SendF64 sends values to toProc, using given unique tag identifier. -// This is Blocking. Must have a corresponding Recv call with same tag on toProc, from this proc -func (cm *Comm) SendF64(toProc int, tag int, vals []float64) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Send(buf, C.int(len(vals)), C.FLOAT64, C.int(toProc), C.int(tag), cm.comm), "SendF64") -} - -// Recv64F64 receives values from proc fmProc, using given unique tag identifier -// This is Blocking. Must have a corresponding Send call with same tag on fmProc, to this proc -func (cm *Comm) RecvF64(fmProc int, tag int, vals []float64) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Recv(buf, C.int(len(vals)), C.FLOAT64, C.int(fmProc), C.int(tag), cm.comm, C.StIgnore), "RecvF64") -} - -// BcastF64 broadcasts slice from fmProc to all other procs. -// All nodes have the same vals after this call, copied from fmProc. -func (cm *Comm) BcastF64(fmProc int, vals []float64) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Bcast(buf, C.int(len(vals)), C.FLOAT64, C.int(fmProc), cm.comm), "BcastF64") -} - -// ReduceF64 reduces all values across procs to toProc in orig to dest using given operation. -// recvbuf is ignored on all procs except toProc. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ReduceF64(toProc int, op Op, dest, orig []float64) error { - sendbuf := unsafe.Pointer(&orig[0]) - var recvbuf unsafe.Pointer - if dest != nil { - recvbuf = unsafe.Pointer(&dest[0]) - } - return Error(C.MPI_Reduce(sendbuf, recvbuf, C.int(len(dest)), C.FLOAT64, op.ToC(), C.int(toProc), cm.comm), "ReduceF64") -} - -// AllReduceF64 reduces all values across procs to all procs from orig into dest using given operation. -// IMPORTANT: orig and dest must be different slices -// To do an in-place operation, set orig to nil -func (cm *Comm) AllReduceF64(op Op, dest, orig []float64) error { - var sendbuf unsafe.Pointer - if orig != nil { - sendbuf = unsafe.Pointer(&orig[0]) - } else { - sendbuf = C.MPI_IN_PLACE - } - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Allreduce(sendbuf, recvbuf, C.int(len(dest)), C.FLOAT64, op.ToC(), cm.comm), "AllReduceF64") -} - -// GatherF64 gathers values from all procs into toProc proc, tiled into dest of size np * len(orig). -// This is inverse of Scatter. -// recvbuf is ignored on all procs except toProc. -// IMPORTANT: orig and dest must be different slices. -func (cm *Comm) GatherF64(toProc int, dest, orig []float64) error { - sendbuf := unsafe.Pointer(&orig[0]) - var recvbuf unsafe.Pointer - if dest != nil { - recvbuf = unsafe.Pointer(&dest[0]) - } - return Error(C.MPI_Gather(sendbuf, C.int(len(orig)), C.FLOAT64, recvbuf, C.int(len(orig)), C.FLOAT64, C.int(toProc), cm.comm), "GatherF64") -} - -// AllGatherF64 gathers values from all procs into all procs, -// tiled by proc into dest of size np * len(orig). -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllGatherF64(dest, orig []float64) error { - sendbuf := unsafe.Pointer(&orig[0]) - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Allgather(sendbuf, C.int(len(orig)), C.FLOAT64, recvbuf, C.int(len(orig)), C.FLOAT64, cm.comm), "GatherF64") -} - -// ScatterF64 scatters values from fmProc to all procs, distributing len(dest) size chunks to -// each proc from orig slice, which must be of size np * len(dest). This is inverse of Gather. -// sendbuf is ignored on all procs except fmProc. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ScatterF64(fmProc int, dest, orig []float64) error { - var sendbuf unsafe.Pointer - if orig != nil { - sendbuf = unsafe.Pointer(&orig[0]) - } - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Scatter(sendbuf, C.int(len(dest)), C.FLOAT64, recvbuf, C.int(len(dest)), C.FLOAT64, C.int(fmProc), cm.comm), "GatherF64") -} - -// SendF32 sends values to toProc, using given unique tag identifier. -// This is Blocking. Must have a corresponding Recv call with same tag on toProc, from this proc -func (cm *Comm) SendF32(toProc int, tag int, vals []float32) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Send(buf, C.int(len(vals)), C.FLOAT32, C.int(toProc), C.int(tag), cm.comm), "SendF32") -} - -// Recv64F32 receives values from proc fmProc, using given unique tag identifier -// This is Blocking. Must have a corresponding Send call with same tag on fmProc, to this proc -func (cm *Comm) RecvF32(fmProc int, tag int, vals []float32) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Recv(buf, C.int(len(vals)), C.FLOAT32, C.int(fmProc), C.int(tag), cm.comm, C.StIgnore), "RecvF32") -} - -// BcastF32 broadcasts slice from fmProc to all other procs. -// All nodes have the same vals after this call, copied from fmProc. -func (cm *Comm) BcastF32(fmProc int, vals []float32) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Bcast(buf, C.int(len(vals)), C.FLOAT32, C.int(fmProc), cm.comm), "BcastF32") -} - -// ReduceF32 reduces all values across procs to toProc in orig to dest using given operation. -// recvbuf is ignored on all procs except toProc. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ReduceF32(toProc int, op Op, dest, orig []float32) error { - sendbuf := unsafe.Pointer(&orig[0]) - var recvbuf unsafe.Pointer - if dest != nil { - recvbuf = unsafe.Pointer(&dest[0]) - } - return Error(C.MPI_Reduce(sendbuf, recvbuf, C.int(len(dest)), C.FLOAT32, op.ToC(), C.int(toProc), cm.comm), "ReduceF32") -} - -// AllReduceF32 reduces all values across procs to all procs from orig into dest using given operation. -// IMPORTANT: orig and dest must be different slices -// To do an in-place operation, set orig to nil -func (cm *Comm) AllReduceF32(op Op, dest, orig []float32) error { - var sendbuf unsafe.Pointer - if orig != nil { - sendbuf = unsafe.Pointer(&orig[0]) - } else { - sendbuf = C.MPI_IN_PLACE - } - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Allreduce(sendbuf, recvbuf, C.int(len(dest)), C.FLOAT32, op.ToC(), cm.comm), "AllReduceF32") -} - -// GatherF32 gathers values from all procs into toProc proc, tiled into dest of size np * len(orig). -// This is inverse of Scatter. -// recvbuf is ignored on all procs except toProc. -// IMPORTANT: orig and dest must be different slices. -func (cm *Comm) GatherF32(toProc int, dest, orig []float32) error { - sendbuf := unsafe.Pointer(&orig[0]) - var recvbuf unsafe.Pointer - if dest != nil { - recvbuf = unsafe.Pointer(&dest[0]) - } - return Error(C.MPI_Gather(sendbuf, C.int(len(orig)), C.FLOAT32, recvbuf, C.int(len(orig)), C.FLOAT32, C.int(toProc), cm.comm), "GatherF32") -} - -// AllGatherF32 gathers values from all procs into all procs, -// tiled by proc into dest of size np * len(orig). -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllGatherF32(dest, orig []float32) error { - sendbuf := unsafe.Pointer(&orig[0]) - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Allgather(sendbuf, C.int(len(orig)), C.FLOAT32, recvbuf, C.int(len(orig)), C.FLOAT32, cm.comm), "GatherF32") -} - -// ScatterF32 scatters values from fmProc to all procs, distributing len(dest) size chunks to -// each proc from orig slice, which must be of size np * len(dest). This is inverse of Gather. -// sendbuf is ignored on all procs except fmProc. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ScatterF32(fmProc int, dest, orig []float32) error { - var sendbuf unsafe.Pointer - if orig != nil { - sendbuf = unsafe.Pointer(&orig[0]) - } - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Scatter(sendbuf, C.int(len(dest)), C.FLOAT32, recvbuf, C.int(len(dest)), C.FLOAT32, C.int(fmProc), cm.comm), "GatherF32") -} - -// SendInt sends values to toProc, using given unique tag identifier. -// This is Blocking. Must have a corresponding Recv call with same tag on toProc, from this proc -func (cm *Comm) SendInt(toProc int, tag int, vals []int) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Send(buf, C.int(len(vals)), C.INT64, C.int(toProc), C.int(tag), cm.comm), "SendInt") -} - -// Recv64Int receives values from proc fmProc, using given unique tag identifier -// This is Blocking. Must have a corresponding Send call with same tag on fmProc, to this proc -func (cm *Comm) RecvInt(fmProc int, tag int, vals []int) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Recv(buf, C.int(len(vals)), C.INT64, C.int(fmProc), C.int(tag), cm.comm, C.StIgnore), "RecvInt") -} - -// BcastInt broadcasts slice from fmProc to all other procs. -// All nodes have the same vals after this call, copied from fmProc. -func (cm *Comm) BcastInt(fmProc int, vals []int) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Bcast(buf, C.int(len(vals)), C.INT64, C.int(fmProc), cm.comm), "BcastInt") -} - -// ReduceInt reduces all values across procs to toProc in orig to dest using given operation. -// recvbuf is ignored on all procs except toProc. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ReduceInt(toProc int, op Op, dest, orig []int) error { - sendbuf := unsafe.Pointer(&orig[0]) - var recvbuf unsafe.Pointer - if dest != nil { - recvbuf = unsafe.Pointer(&dest[0]) - } - return Error(C.MPI_Reduce(sendbuf, recvbuf, C.int(len(dest)), C.INT64, op.ToC(), C.int(toProc), cm.comm), "ReduceInt") -} - -// AllReduceInt reduces all values across procs to all procs from orig into dest using given operation. -// IMPORTANT: orig and dest must be different slices -// To do an in-place operation, set orig to nil -func (cm *Comm) AllReduceInt(op Op, dest, orig []int) error { - var sendbuf unsafe.Pointer - if orig != nil { - sendbuf = unsafe.Pointer(&orig[0]) - } else { - sendbuf = C.MPI_IN_PLACE - } - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Allreduce(sendbuf, recvbuf, C.int(len(dest)), C.INT64, op.ToC(), cm.comm), "AllReduceInt") -} - -// GatherInt gathers values from all procs into toProc proc, tiled into dest of size np * len(orig). -// This is inverse of Scatter. -// recvbuf is ignored on all procs except toProc. -// IMPORTANT: orig and dest must be different slices. -func (cm *Comm) GatherInt(toProc int, dest, orig []int) error { - sendbuf := unsafe.Pointer(&orig[0]) - var recvbuf unsafe.Pointer - if dest != nil { - recvbuf = unsafe.Pointer(&dest[0]) - } - return Error(C.MPI_Gather(sendbuf, C.int(len(orig)), C.INT64, recvbuf, C.int(len(orig)), C.INT64, C.int(toProc), cm.comm), "GatherInt") -} - -// AllGatherInt gathers values from all procs into all procs, -// tiled by proc into dest of size np * len(orig). -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllGatherInt(dest, orig []int) error { - sendbuf := unsafe.Pointer(&orig[0]) - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Allgather(sendbuf, C.int(len(orig)), C.INT64, recvbuf, C.int(len(orig)), C.INT64, cm.comm), "GatherInt") -} - -// ScatterInt scatters values from fmProc to all procs, distributing len(dest) size chunks to -// each proc from orig slice, which must be of size np * len(dest). This is inverse of Gather. -// sendbuf is ignored on all procs except fmProc. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ScatterInt(fmProc int, dest, orig []int) error { - var sendbuf unsafe.Pointer - if orig != nil { - sendbuf = unsafe.Pointer(&orig[0]) - } - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Scatter(sendbuf, C.int(len(dest)), C.INT64, recvbuf, C.int(len(dest)), C.INT64, C.int(fmProc), cm.comm), "GatherInt") -} - -// SendI64 sends values to toProc, using given unique tag identifier. -// This is Blocking. Must have a corresponding Recv call with same tag on toProc, from this proc -func (cm *Comm) SendI64(toProc int, tag int, vals []int64) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Send(buf, C.int(len(vals)), C.INT64, C.int(toProc), C.int(tag), cm.comm), "SendI64") -} - -// Recv64I64 receives values from proc fmProc, using given unique tag identifier -// This is Blocking. Must have a corresponding Send call with same tag on fmProc, to this proc -func (cm *Comm) RecvI64(fmProc int, tag int, vals []int64) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Recv(buf, C.int(len(vals)), C.INT64, C.int(fmProc), C.int(tag), cm.comm, C.StIgnore), "RecvI64") -} - -// BcastI64 broadcasts slice from fmProc to all other procs. -// All nodes have the same vals after this call, copied from fmProc. -func (cm *Comm) BcastI64(fmProc int, vals []int64) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Bcast(buf, C.int(len(vals)), C.INT64, C.int(fmProc), cm.comm), "BcastI64") -} - -// ReduceI64 reduces all values across procs to toProc in orig to dest using given operation. -// recvbuf is ignored on all procs except toProc. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ReduceI64(toProc int, op Op, dest, orig []int64) error { - sendbuf := unsafe.Pointer(&orig[0]) - var recvbuf unsafe.Pointer - if dest != nil { - recvbuf = unsafe.Pointer(&dest[0]) - } - return Error(C.MPI_Reduce(sendbuf, recvbuf, C.int(len(dest)), C.INT64, op.ToC(), C.int(toProc), cm.comm), "ReduceI64") -} - -// AllReduceI64 reduces all values across procs to all procs from orig into dest using given operation. -// IMPORTANT: orig and dest must be different slices -// To do an in-place operation, set orig to nil -func (cm *Comm) AllReduceI64(op Op, dest, orig []int64) error { - var sendbuf unsafe.Pointer - if orig != nil { - sendbuf = unsafe.Pointer(&orig[0]) - } else { - sendbuf = C.MPI_IN_PLACE - } - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Allreduce(sendbuf, recvbuf, C.int(len(dest)), C.INT64, op.ToC(), cm.comm), "AllReduceI64") -} - -// GatherI64 gathers values from all procs into toProc proc, tiled into dest of size np * len(orig). -// This is inverse of Scatter. -// recvbuf is ignored on all procs except toProc. -// IMPORTANT: orig and dest must be different slices. -func (cm *Comm) GatherI64(toProc int, dest, orig []int64) error { - sendbuf := unsafe.Pointer(&orig[0]) - var recvbuf unsafe.Pointer - if dest != nil { - recvbuf = unsafe.Pointer(&dest[0]) - } - return Error(C.MPI_Gather(sendbuf, C.int(len(orig)), C.INT64, recvbuf, C.int(len(orig)), C.INT64, C.int(toProc), cm.comm), "GatherI64") -} - -// AllGatherI64 gathers values from all procs into all procs, -// tiled by proc into dest of size np * len(orig). -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllGatherI64(dest, orig []int64) error { - sendbuf := unsafe.Pointer(&orig[0]) - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Allgather(sendbuf, C.int(len(orig)), C.INT64, recvbuf, C.int(len(orig)), C.INT64, cm.comm), "GatherI64") -} - -// ScatterI64 scatters values from fmProc to all procs, distributing len(dest) size chunks to -// each proc from orig slice, which must be of size np * len(dest). This is inverse of Gather. -// sendbuf is ignored on all procs except fmProc. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ScatterI64(fmProc int, dest, orig []int64) error { - var sendbuf unsafe.Pointer - if orig != nil { - sendbuf = unsafe.Pointer(&orig[0]) - } - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Scatter(sendbuf, C.int(len(dest)), C.INT64, recvbuf, C.int(len(dest)), C.INT64, C.int(fmProc), cm.comm), "GatherI64") -} - -// SendU64 sends values to toProc, using given unique tag identifier. -// This is Blocking. Must have a corresponding Recv call with same tag on toProc, from this proc -func (cm *Comm) SendU64(toProc int, tag int, vals []uint64) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Send(buf, C.int(len(vals)), C.UINT64, C.int(toProc), C.int(tag), cm.comm), "SendU64") -} - -// Recv64U64 receives values from proc fmProc, using given unique tag identifier -// This is Blocking. Must have a corresponding Send call with same tag on fmProc, to this proc -func (cm *Comm) RecvU64(fmProc int, tag int, vals []uint64) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Recv(buf, C.int(len(vals)), C.UINT64, C.int(fmProc), C.int(tag), cm.comm, C.StIgnore), "RecvU64") -} - -// BcastU64 broadcasts slice from fmProc to all other procs. -// All nodes have the same vals after this call, copied from fmProc. -func (cm *Comm) BcastU64(fmProc int, vals []uint64) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Bcast(buf, C.int(len(vals)), C.UINT64, C.int(fmProc), cm.comm), "BcastU64") -} - -// ReduceU64 reduces all values across procs to toProc in orig to dest using given operation. -// recvbuf is ignored on all procs except toProc. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ReduceU64(toProc int, op Op, dest, orig []uint64) error { - sendbuf := unsafe.Pointer(&orig[0]) - var recvbuf unsafe.Pointer - if dest != nil { - recvbuf = unsafe.Pointer(&dest[0]) - } - return Error(C.MPI_Reduce(sendbuf, recvbuf, C.int(len(dest)), C.UINT64, op.ToC(), C.int(toProc), cm.comm), "ReduceU64") -} - -// AllReduceU64 reduces all values across procs to all procs from orig into dest using given operation. -// IMPORTANT: orig and dest must be different slices -// To do an in-place operation, set orig to nil -func (cm *Comm) AllReduceU64(op Op, dest, orig []uint64) error { - var sendbuf unsafe.Pointer - if orig != nil { - sendbuf = unsafe.Pointer(&orig[0]) - } else { - sendbuf = C.MPI_IN_PLACE - } - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Allreduce(sendbuf, recvbuf, C.int(len(dest)), C.UINT64, op.ToC(), cm.comm), "AllReduceU64") -} - -// GatherU64 gathers values from all procs into toProc proc, tiled into dest of size np * len(orig). -// This is inverse of Scatter. -// recvbuf is ignored on all procs except toProc. -// IMPORTANT: orig and dest must be different slices. -func (cm *Comm) GatherU64(toProc int, dest, orig []uint64) error { - sendbuf := unsafe.Pointer(&orig[0]) - var recvbuf unsafe.Pointer - if dest != nil { - recvbuf = unsafe.Pointer(&dest[0]) - } - return Error(C.MPI_Gather(sendbuf, C.int(len(orig)), C.UINT64, recvbuf, C.int(len(orig)), C.UINT64, C.int(toProc), cm.comm), "GatherU64") -} - -// AllGatherU64 gathers values from all procs into all procs, -// tiled by proc into dest of size np * len(orig). -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllGatherU64(dest, orig []uint64) error { - sendbuf := unsafe.Pointer(&orig[0]) - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Allgather(sendbuf, C.int(len(orig)), C.UINT64, recvbuf, C.int(len(orig)), C.UINT64, cm.comm), "GatherU64") -} - -// ScatterU64 scatters values from fmProc to all procs, distributing len(dest) size chunks to -// each proc from orig slice, which must be of size np * len(dest). This is inverse of Gather. -// sendbuf is ignored on all procs except fmProc. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ScatterU64(fmProc int, dest, orig []uint64) error { - var sendbuf unsafe.Pointer - if orig != nil { - sendbuf = unsafe.Pointer(&orig[0]) - } - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Scatter(sendbuf, C.int(len(dest)), C.UINT64, recvbuf, C.int(len(dest)), C.UINT64, C.int(fmProc), cm.comm), "GatherU64") -} - -// SendI32 sends values to toProc, using given unique tag identifier. -// This is Blocking. Must have a corresponding Recv call with same tag on toProc, from this proc -func (cm *Comm) SendI32(toProc int, tag int, vals []int32) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Send(buf, C.int(len(vals)), C.INT32, C.int(toProc), C.int(tag), cm.comm), "SendI32") -} - -// Recv64I32 receives values from proc fmProc, using given unique tag identifier -// This is Blocking. Must have a corresponding Send call with same tag on fmProc, to this proc -func (cm *Comm) RecvI32(fmProc int, tag int, vals []int32) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Recv(buf, C.int(len(vals)), C.INT32, C.int(fmProc), C.int(tag), cm.comm, C.StIgnore), "RecvI32") -} - -// BcastI32 broadcasts slice from fmProc to all other procs. -// All nodes have the same vals after this call, copied from fmProc. -func (cm *Comm) BcastI32(fmProc int, vals []int32) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Bcast(buf, C.int(len(vals)), C.INT32, C.int(fmProc), cm.comm), "BcastI32") -} - -// ReduceI32 reduces all values across procs to toProc in orig to dest using given operation. -// recvbuf is ignored on all procs except toProc. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ReduceI32(toProc int, op Op, dest, orig []int32) error { - sendbuf := unsafe.Pointer(&orig[0]) - var recvbuf unsafe.Pointer - if dest != nil { - recvbuf = unsafe.Pointer(&dest[0]) - } - return Error(C.MPI_Reduce(sendbuf, recvbuf, C.int(len(dest)), C.INT32, op.ToC(), C.int(toProc), cm.comm), "ReduceI32") -} - -// AllReduceI32 reduces all values across procs to all procs from orig into dest using given operation. -// IMPORTANT: orig and dest must be different slices -// To do an in-place operation, set orig to nil -func (cm *Comm) AllReduceI32(op Op, dest, orig []int32) error { - var sendbuf unsafe.Pointer - if orig != nil { - sendbuf = unsafe.Pointer(&orig[0]) - } else { - sendbuf = C.MPI_IN_PLACE - } - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Allreduce(sendbuf, recvbuf, C.int(len(dest)), C.INT32, op.ToC(), cm.comm), "AllReduceI32") -} - -// GatherI32 gathers values from all procs into toProc proc, tiled into dest of size np * len(orig). -// This is inverse of Scatter. -// recvbuf is ignored on all procs except toProc. -// IMPORTANT: orig and dest must be different slices. -func (cm *Comm) GatherI32(toProc int, dest, orig []int32) error { - sendbuf := unsafe.Pointer(&orig[0]) - var recvbuf unsafe.Pointer - if dest != nil { - recvbuf = unsafe.Pointer(&dest[0]) - } - return Error(C.MPI_Gather(sendbuf, C.int(len(orig)), C.INT32, recvbuf, C.int(len(orig)), C.INT32, C.int(toProc), cm.comm), "GatherI32") -} - -// AllGatherI32 gathers values from all procs into all procs, -// tiled by proc into dest of size np * len(orig). -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllGatherI32(dest, orig []int32) error { - sendbuf := unsafe.Pointer(&orig[0]) - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Allgather(sendbuf, C.int(len(orig)), C.INT32, recvbuf, C.int(len(orig)), C.INT32, cm.comm), "GatherI32") -} - -// ScatterI32 scatters values from fmProc to all procs, distributing len(dest) size chunks to -// each proc from orig slice, which must be of size np * len(dest). This is inverse of Gather. -// sendbuf is ignored on all procs except fmProc. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ScatterI32(fmProc int, dest, orig []int32) error { - var sendbuf unsafe.Pointer - if orig != nil { - sendbuf = unsafe.Pointer(&orig[0]) - } - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Scatter(sendbuf, C.int(len(dest)), C.INT32, recvbuf, C.int(len(dest)), C.INT32, C.int(fmProc), cm.comm), "GatherI32") -} - -// SendU32 sends values to toProc, using given unique tag identifier. -// This is Blocking. Must have a corresponding Recv call with same tag on toProc, from this proc -func (cm *Comm) SendU32(toProc int, tag int, vals []uint32) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Send(buf, C.int(len(vals)), C.UINT32, C.int(toProc), C.int(tag), cm.comm), "SendU32") -} - -// Recv64U32 receives values from proc fmProc, using given unique tag identifier -// This is Blocking. Must have a corresponding Send call with same tag on fmProc, to this proc -func (cm *Comm) RecvU32(fmProc int, tag int, vals []uint32) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Recv(buf, C.int(len(vals)), C.UINT32, C.int(fmProc), C.int(tag), cm.comm, C.StIgnore), "RecvU32") -} - -// BcastU32 broadcasts slice from fmProc to all other procs. -// All nodes have the same vals after this call, copied from fmProc. -func (cm *Comm) BcastU32(fmProc int, vals []uint32) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Bcast(buf, C.int(len(vals)), C.UINT32, C.int(fmProc), cm.comm), "BcastU32") -} - -// ReduceU32 reduces all values across procs to toProc in orig to dest using given operation. -// recvbuf is ignored on all procs except toProc. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ReduceU32(toProc int, op Op, dest, orig []uint32) error { - sendbuf := unsafe.Pointer(&orig[0]) - var recvbuf unsafe.Pointer - if dest != nil { - recvbuf = unsafe.Pointer(&dest[0]) - } - return Error(C.MPI_Reduce(sendbuf, recvbuf, C.int(len(dest)), C.UINT32, op.ToC(), C.int(toProc), cm.comm), "ReduceU32") -} - -// AllReduceU32 reduces all values across procs to all procs from orig into dest using given operation. -// IMPORTANT: orig and dest must be different slices -// To do an in-place operation, set orig to nil -func (cm *Comm) AllReduceU32(op Op, dest, orig []uint32) error { - var sendbuf unsafe.Pointer - if orig != nil { - sendbuf = unsafe.Pointer(&orig[0]) - } else { - sendbuf = C.MPI_IN_PLACE - } - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Allreduce(sendbuf, recvbuf, C.int(len(dest)), C.UINT32, op.ToC(), cm.comm), "AllReduceU32") -} - -// GatherU32 gathers values from all procs into toProc proc, tiled into dest of size np * len(orig). -// This is inverse of Scatter. -// recvbuf is ignored on all procs except toProc. -// IMPORTANT: orig and dest must be different slices. -func (cm *Comm) GatherU32(toProc int, dest, orig []uint32) error { - sendbuf := unsafe.Pointer(&orig[0]) - var recvbuf unsafe.Pointer - if dest != nil { - recvbuf = unsafe.Pointer(&dest[0]) - } - return Error(C.MPI_Gather(sendbuf, C.int(len(orig)), C.UINT32, recvbuf, C.int(len(orig)), C.UINT32, C.int(toProc), cm.comm), "GatherU32") -} - -// AllGatherU32 gathers values from all procs into all procs, -// tiled by proc into dest of size np * len(orig). -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllGatherU32(dest, orig []uint32) error { - sendbuf := unsafe.Pointer(&orig[0]) - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Allgather(sendbuf, C.int(len(orig)), C.UINT32, recvbuf, C.int(len(orig)), C.UINT32, cm.comm), "GatherU32") -} - -// ScatterU32 scatters values from fmProc to all procs, distributing len(dest) size chunks to -// each proc from orig slice, which must be of size np * len(dest). This is inverse of Gather. -// sendbuf is ignored on all procs except fmProc. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ScatterU32(fmProc int, dest, orig []uint32) error { - var sendbuf unsafe.Pointer - if orig != nil { - sendbuf = unsafe.Pointer(&orig[0]) - } - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Scatter(sendbuf, C.int(len(dest)), C.UINT32, recvbuf, C.int(len(dest)), C.UINT32, C.int(fmProc), cm.comm), "GatherU32") -} - -// SendI16 sends values to toProc, using given unique tag identifier. -// This is Blocking. Must have a corresponding Recv call with same tag on toProc, from this proc -func (cm *Comm) SendI16(toProc int, tag int, vals []int16) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Send(buf, C.int(len(vals)), C.INT16, C.int(toProc), C.int(tag), cm.comm), "SendI16") -} - -// Recv64I16 receives values from proc fmProc, using given unique tag identifier -// This is Blocking. Must have a corresponding Send call with same tag on fmProc, to this proc -func (cm *Comm) RecvI16(fmProc int, tag int, vals []int16) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Recv(buf, C.int(len(vals)), C.INT16, C.int(fmProc), C.int(tag), cm.comm, C.StIgnore), "RecvI16") -} - -// BcastI16 broadcasts slice from fmProc to all other procs. -// All nodes have the same vals after this call, copied from fmProc. -func (cm *Comm) BcastI16(fmProc int, vals []int16) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Bcast(buf, C.int(len(vals)), C.INT16, C.int(fmProc), cm.comm), "BcastI16") -} - -// ReduceI16 reduces all values across procs to toProc in orig to dest using given operation. -// recvbuf is ignored on all procs except toProc. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ReduceI16(toProc int, op Op, dest, orig []int16) error { - sendbuf := unsafe.Pointer(&orig[0]) - var recvbuf unsafe.Pointer - if dest != nil { - recvbuf = unsafe.Pointer(&dest[0]) - } - return Error(C.MPI_Reduce(sendbuf, recvbuf, C.int(len(dest)), C.INT16, op.ToC(), C.int(toProc), cm.comm), "ReduceI16") -} - -// AllReduceI16 reduces all values across procs to all procs from orig into dest using given operation. -// IMPORTANT: orig and dest must be different slices -// To do an in-place operation, set orig to nil -func (cm *Comm) AllReduceI16(op Op, dest, orig []int16) error { - var sendbuf unsafe.Pointer - if orig != nil { - sendbuf = unsafe.Pointer(&orig[0]) - } else { - sendbuf = C.MPI_IN_PLACE - } - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Allreduce(sendbuf, recvbuf, C.int(len(dest)), C.INT16, op.ToC(), cm.comm), "AllReduceI16") -} - -// GatherI16 gathers values from all procs into toProc proc, tiled into dest of size np * len(orig). -// This is inverse of Scatter. -// recvbuf is ignored on all procs except toProc. -// IMPORTANT: orig and dest must be different slices. -func (cm *Comm) GatherI16(toProc int, dest, orig []int16) error { - sendbuf := unsafe.Pointer(&orig[0]) - var recvbuf unsafe.Pointer - if dest != nil { - recvbuf = unsafe.Pointer(&dest[0]) - } - return Error(C.MPI_Gather(sendbuf, C.int(len(orig)), C.INT16, recvbuf, C.int(len(orig)), C.INT16, C.int(toProc), cm.comm), "GatherI16") -} - -// AllGatherI16 gathers values from all procs into all procs, -// tiled by proc into dest of size np * len(orig). -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllGatherI16(dest, orig []int16) error { - sendbuf := unsafe.Pointer(&orig[0]) - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Allgather(sendbuf, C.int(len(orig)), C.INT16, recvbuf, C.int(len(orig)), C.INT16, cm.comm), "GatherI16") -} - -// ScatterI16 scatters values from fmProc to all procs, distributing len(dest) size chunks to -// each proc from orig slice, which must be of size np * len(dest). This is inverse of Gather. -// sendbuf is ignored on all procs except fmProc. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ScatterI16(fmProc int, dest, orig []int16) error { - var sendbuf unsafe.Pointer - if orig != nil { - sendbuf = unsafe.Pointer(&orig[0]) - } - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Scatter(sendbuf, C.int(len(dest)), C.INT16, recvbuf, C.int(len(dest)), C.INT16, C.int(fmProc), cm.comm), "GatherI16") -} - -// SendU16 sends values to toProc, using given unique tag identifier. -// This is Blocking. Must have a corresponding Recv call with same tag on toProc, from this proc -func (cm *Comm) SendU16(toProc int, tag int, vals []uint16) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Send(buf, C.int(len(vals)), C.UINT16, C.int(toProc), C.int(tag), cm.comm), "SendU16") -} - -// Recv64U16 receives values from proc fmProc, using given unique tag identifier -// This is Blocking. Must have a corresponding Send call with same tag on fmProc, to this proc -func (cm *Comm) RecvU16(fmProc int, tag int, vals []uint16) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Recv(buf, C.int(len(vals)), C.UINT16, C.int(fmProc), C.int(tag), cm.comm, C.StIgnore), "RecvU16") -} - -// BcastU16 broadcasts slice from fmProc to all other procs. -// All nodes have the same vals after this call, copied from fmProc. -func (cm *Comm) BcastU16(fmProc int, vals []uint16) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Bcast(buf, C.int(len(vals)), C.UINT16, C.int(fmProc), cm.comm), "BcastU16") -} - -// ReduceU16 reduces all values across procs to toProc in orig to dest using given operation. -// recvbuf is ignored on all procs except toProc. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ReduceU16(toProc int, op Op, dest, orig []uint16) error { - sendbuf := unsafe.Pointer(&orig[0]) - var recvbuf unsafe.Pointer - if dest != nil { - recvbuf = unsafe.Pointer(&dest[0]) - } - return Error(C.MPI_Reduce(sendbuf, recvbuf, C.int(len(dest)), C.UINT16, op.ToC(), C.int(toProc), cm.comm), "ReduceU16") -} - -// AllReduceU16 reduces all values across procs to all procs from orig into dest using given operation. -// IMPORTANT: orig and dest must be different slices -// To do an in-place operation, set orig to nil -func (cm *Comm) AllReduceU16(op Op, dest, orig []uint16) error { - var sendbuf unsafe.Pointer - if orig != nil { - sendbuf = unsafe.Pointer(&orig[0]) - } else { - sendbuf = C.MPI_IN_PLACE - } - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Allreduce(sendbuf, recvbuf, C.int(len(dest)), C.UINT16, op.ToC(), cm.comm), "AllReduceU16") -} - -// GatherU16 gathers values from all procs into toProc proc, tiled into dest of size np * len(orig). -// This is inverse of Scatter. -// recvbuf is ignored on all procs except toProc. -// IMPORTANT: orig and dest must be different slices. -func (cm *Comm) GatherU16(toProc int, dest, orig []uint16) error { - sendbuf := unsafe.Pointer(&orig[0]) - var recvbuf unsafe.Pointer - if dest != nil { - recvbuf = unsafe.Pointer(&dest[0]) - } - return Error(C.MPI_Gather(sendbuf, C.int(len(orig)), C.UINT16, recvbuf, C.int(len(orig)), C.UINT16, C.int(toProc), cm.comm), "GatherU16") -} - -// AllGatherU16 gathers values from all procs into all procs, -// tiled by proc into dest of size np * len(orig). -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllGatherU16(dest, orig []uint16) error { - sendbuf := unsafe.Pointer(&orig[0]) - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Allgather(sendbuf, C.int(len(orig)), C.UINT16, recvbuf, C.int(len(orig)), C.UINT16, cm.comm), "GatherU16") -} - -// ScatterU16 scatters values from fmProc to all procs, distributing len(dest) size chunks to -// each proc from orig slice, which must be of size np * len(dest). This is inverse of Gather. -// sendbuf is ignored on all procs except fmProc. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ScatterU16(fmProc int, dest, orig []uint16) error { - var sendbuf unsafe.Pointer - if orig != nil { - sendbuf = unsafe.Pointer(&orig[0]) - } - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Scatter(sendbuf, C.int(len(dest)), C.UINT16, recvbuf, C.int(len(dest)), C.UINT16, C.int(fmProc), cm.comm), "GatherU16") -} - -// SendI8 sends values to toProc, using given unique tag identifier. -// This is Blocking. Must have a corresponding Recv call with same tag on toProc, from this proc -func (cm *Comm) SendI8(toProc int, tag int, vals []int8) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Send(buf, C.int(len(vals)), C.BYTE, C.int(toProc), C.int(tag), cm.comm), "SendI8") -} - -// Recv64I8 receives values from proc fmProc, using given unique tag identifier -// This is Blocking. Must have a corresponding Send call with same tag on fmProc, to this proc -func (cm *Comm) RecvI8(fmProc int, tag int, vals []int8) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Recv(buf, C.int(len(vals)), C.BYTE, C.int(fmProc), C.int(tag), cm.comm, C.StIgnore), "RecvI8") -} - -// BcastI8 broadcasts slice from fmProc to all other procs. -// All nodes have the same vals after this call, copied from fmProc. -func (cm *Comm) BcastI8(fmProc int, vals []int8) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Bcast(buf, C.int(len(vals)), C.BYTE, C.int(fmProc), cm.comm), "BcastI8") -} - -// ReduceI8 reduces all values across procs to toProc in orig to dest using given operation. -// recvbuf is ignored on all procs except toProc. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ReduceI8(toProc int, op Op, dest, orig []int8) error { - sendbuf := unsafe.Pointer(&orig[0]) - var recvbuf unsafe.Pointer - if dest != nil { - recvbuf = unsafe.Pointer(&dest[0]) - } - return Error(C.MPI_Reduce(sendbuf, recvbuf, C.int(len(dest)), C.BYTE, op.ToC(), C.int(toProc), cm.comm), "ReduceI8") -} - -// AllReduceI8 reduces all values across procs to all procs from orig into dest using given operation. -// IMPORTANT: orig and dest must be different slices -// To do an in-place operation, set orig to nil -func (cm *Comm) AllReduceI8(op Op, dest, orig []int8) error { - var sendbuf unsafe.Pointer - if orig != nil { - sendbuf = unsafe.Pointer(&orig[0]) - } else { - sendbuf = C.MPI_IN_PLACE - } - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Allreduce(sendbuf, recvbuf, C.int(len(dest)), C.BYTE, op.ToC(), cm.comm), "AllReduceI8") -} - -// GatherI8 gathers values from all procs into toProc proc, tiled into dest of size np * len(orig). -// This is inverse of Scatter. -// recvbuf is ignored on all procs except toProc. -// IMPORTANT: orig and dest must be different slices. -func (cm *Comm) GatherI8(toProc int, dest, orig []int8) error { - sendbuf := unsafe.Pointer(&orig[0]) - var recvbuf unsafe.Pointer - if dest != nil { - recvbuf = unsafe.Pointer(&dest[0]) - } - return Error(C.MPI_Gather(sendbuf, C.int(len(orig)), C.BYTE, recvbuf, C.int(len(orig)), C.BYTE, C.int(toProc), cm.comm), "GatherI8") -} - -// AllGatherI8 gathers values from all procs into all procs, -// tiled by proc into dest of size np * len(orig). -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllGatherI8(dest, orig []int8) error { - sendbuf := unsafe.Pointer(&orig[0]) - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Allgather(sendbuf, C.int(len(orig)), C.BYTE, recvbuf, C.int(len(orig)), C.BYTE, cm.comm), "GatherI8") -} - -// ScatterI8 scatters values from fmProc to all procs, distributing len(dest) size chunks to -// each proc from orig slice, which must be of size np * len(dest). This is inverse of Gather. -// sendbuf is ignored on all procs except fmProc. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ScatterI8(fmProc int, dest, orig []int8) error { - var sendbuf unsafe.Pointer - if orig != nil { - sendbuf = unsafe.Pointer(&orig[0]) - } - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Scatter(sendbuf, C.int(len(dest)), C.BYTE, recvbuf, C.int(len(dest)), C.BYTE, C.int(fmProc), cm.comm), "GatherI8") -} - -// SendU8 sends values to toProc, using given unique tag identifier. -// This is Blocking. Must have a corresponding Recv call with same tag on toProc, from this proc -func (cm *Comm) SendU8(toProc int, tag int, vals []uint8) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Send(buf, C.int(len(vals)), C.BYTE, C.int(toProc), C.int(tag), cm.comm), "SendU8") -} - -// Recv64U8 receives values from proc fmProc, using given unique tag identifier -// This is Blocking. Must have a corresponding Send call with same tag on fmProc, to this proc -func (cm *Comm) RecvU8(fmProc int, tag int, vals []uint8) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Recv(buf, C.int(len(vals)), C.BYTE, C.int(fmProc), C.int(tag), cm.comm, C.StIgnore), "RecvU8") -} - -// BcastU8 broadcasts slice from fmProc to all other procs. -// All nodes have the same vals after this call, copied from fmProc. -func (cm *Comm) BcastU8(fmProc int, vals []uint8) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Bcast(buf, C.int(len(vals)), C.BYTE, C.int(fmProc), cm.comm), "BcastU8") -} - -// ReduceU8 reduces all values across procs to toProc in orig to dest using given operation. -// recvbuf is ignored on all procs except toProc. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ReduceU8(toProc int, op Op, dest, orig []uint8) error { - sendbuf := unsafe.Pointer(&orig[0]) - var recvbuf unsafe.Pointer - if dest != nil { - recvbuf = unsafe.Pointer(&dest[0]) - } - return Error(C.MPI_Reduce(sendbuf, recvbuf, C.int(len(dest)), C.BYTE, op.ToC(), C.int(toProc), cm.comm), "ReduceU8") -} - -// AllReduceU8 reduces all values across procs to all procs from orig into dest using given operation. -// IMPORTANT: orig and dest must be different slices -// To do an in-place operation, set orig to nil -func (cm *Comm) AllReduceU8(op Op, dest, orig []uint8) error { - var sendbuf unsafe.Pointer - if orig != nil { - sendbuf = unsafe.Pointer(&orig[0]) - } else { - sendbuf = C.MPI_IN_PLACE - } - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Allreduce(sendbuf, recvbuf, C.int(len(dest)), C.BYTE, op.ToC(), cm.comm), "AllReduceU8") -} - -// GatherU8 gathers values from all procs into toProc proc, tiled into dest of size np * len(orig). -// This is inverse of Scatter. -// recvbuf is ignored on all procs except toProc. -// IMPORTANT: orig and dest must be different slices. -func (cm *Comm) GatherU8(toProc int, dest, orig []uint8) error { - sendbuf := unsafe.Pointer(&orig[0]) - var recvbuf unsafe.Pointer - if dest != nil { - recvbuf = unsafe.Pointer(&dest[0]) - } - return Error(C.MPI_Gather(sendbuf, C.int(len(orig)), C.BYTE, recvbuf, C.int(len(orig)), C.BYTE, C.int(toProc), cm.comm), "GatherU8") -} - -// AllGatherU8 gathers values from all procs into all procs, -// tiled by proc into dest of size np * len(orig). -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllGatherU8(dest, orig []uint8) error { - sendbuf := unsafe.Pointer(&orig[0]) - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Allgather(sendbuf, C.int(len(orig)), C.BYTE, recvbuf, C.int(len(orig)), C.BYTE, cm.comm), "GatherU8") -} - -// ScatterU8 scatters values from fmProc to all procs, distributing len(dest) size chunks to -// each proc from orig slice, which must be of size np * len(dest). This is inverse of Gather. -// sendbuf is ignored on all procs except fmProc. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ScatterU8(fmProc int, dest, orig []uint8) error { - var sendbuf unsafe.Pointer - if orig != nil { - sendbuf = unsafe.Pointer(&orig[0]) - } - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Scatter(sendbuf, C.int(len(dest)), C.BYTE, recvbuf, C.int(len(dest)), C.BYTE, C.int(fmProc), cm.comm), "GatherU8") -} - -// SendC128 sends values to toProc, using given unique tag identifier. -// This is Blocking. Must have a corresponding Recv call with same tag on toProc, from this proc -func (cm *Comm) SendC128(toProc int, tag int, vals []complex128) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Send(buf, C.int(len(vals)), C.COMPLEX128, C.int(toProc), C.int(tag), cm.comm), "SendC128") -} - -// Recv64C128 receives values from proc fmProc, using given unique tag identifier -// This is Blocking. Must have a corresponding Send call with same tag on fmProc, to this proc -func (cm *Comm) RecvC128(fmProc int, tag int, vals []complex128) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Recv(buf, C.int(len(vals)), C.COMPLEX128, C.int(fmProc), C.int(tag), cm.comm, C.StIgnore), "RecvC128") -} - -// BcastC128 broadcasts slice from fmProc to all other procs. -// All nodes have the same vals after this call, copied from fmProc. -func (cm *Comm) BcastC128(fmProc int, vals []complex128) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Bcast(buf, C.int(len(vals)), C.COMPLEX128, C.int(fmProc), cm.comm), "BcastC128") -} - -// ReduceC128 reduces all values across procs to toProc in orig to dest using given operation. -// recvbuf is ignored on all procs except toProc. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ReduceC128(toProc int, op Op, dest, orig []complex128) error { - sendbuf := unsafe.Pointer(&orig[0]) - var recvbuf unsafe.Pointer - if dest != nil { - recvbuf = unsafe.Pointer(&dest[0]) - } - return Error(C.MPI_Reduce(sendbuf, recvbuf, C.int(len(dest)), C.COMPLEX128, op.ToC(), C.int(toProc), cm.comm), "ReduceC128") -} - -// AllReduceC128 reduces all values across procs to all procs from orig into dest using given operation. -// IMPORTANT: orig and dest must be different slices -// To do an in-place operation, set orig to nil -func (cm *Comm) AllReduceC128(op Op, dest, orig []complex128) error { - var sendbuf unsafe.Pointer - if orig != nil { - sendbuf = unsafe.Pointer(&orig[0]) - } else { - sendbuf = C.MPI_IN_PLACE - } - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Allreduce(sendbuf, recvbuf, C.int(len(dest)), C.COMPLEX128, op.ToC(), cm.comm), "AllReduceC128") -} - -// GatherC128 gathers values from all procs into toProc proc, tiled into dest of size np * len(orig). -// This is inverse of Scatter. -// recvbuf is ignored on all procs except toProc. -// IMPORTANT: orig and dest must be different slices. -func (cm *Comm) GatherC128(toProc int, dest, orig []complex128) error { - sendbuf := unsafe.Pointer(&orig[0]) - var recvbuf unsafe.Pointer - if dest != nil { - recvbuf = unsafe.Pointer(&dest[0]) - } - return Error(C.MPI_Gather(sendbuf, C.int(len(orig)), C.COMPLEX128, recvbuf, C.int(len(orig)), C.COMPLEX128, C.int(toProc), cm.comm), "GatherC128") -} - -// AllGatherC128 gathers values from all procs into all procs, -// tiled by proc into dest of size np * len(orig). -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllGatherC128(dest, orig []complex128) error { - sendbuf := unsafe.Pointer(&orig[0]) - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Allgather(sendbuf, C.int(len(orig)), C.COMPLEX128, recvbuf, C.int(len(orig)), C.COMPLEX128, cm.comm), "GatherC128") -} - -// ScatterC128 scatters values from fmProc to all procs, distributing len(dest) size chunks to -// each proc from orig slice, which must be of size np * len(dest). This is inverse of Gather. -// sendbuf is ignored on all procs except fmProc. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ScatterC128(fmProc int, dest, orig []complex128) error { - var sendbuf unsafe.Pointer - if orig != nil { - sendbuf = unsafe.Pointer(&orig[0]) - } - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Scatter(sendbuf, C.int(len(dest)), C.COMPLEX128, recvbuf, C.int(len(dest)), C.COMPLEX128, C.int(fmProc), cm.comm), "GatherC128") -} - -// SendC64 sends values to toProc, using given unique tag identifier. -// This is Blocking. Must have a corresponding Recv call with same tag on toProc, from this proc -func (cm *Comm) SendC64(toProc int, tag int, vals []complex64) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Send(buf, C.int(len(vals)), C.COMPLEX64, C.int(toProc), C.int(tag), cm.comm), "SendC64") -} - -// Recv64C64 receives values from proc fmProc, using given unique tag identifier -// This is Blocking. Must have a corresponding Send call with same tag on fmProc, to this proc -func (cm *Comm) RecvC64(fmProc int, tag int, vals []complex64) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Recv(buf, C.int(len(vals)), C.COMPLEX64, C.int(fmProc), C.int(tag), cm.comm, C.StIgnore), "RecvC64") -} - -// BcastC64 broadcasts slice from fmProc to all other procs. -// All nodes have the same vals after this call, copied from fmProc. -func (cm *Comm) BcastC64(fmProc int, vals []complex64) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Bcast(buf, C.int(len(vals)), C.COMPLEX64, C.int(fmProc), cm.comm), "BcastC64") -} - -// ReduceC64 reduces all values across procs to toProc in orig to dest using given operation. -// recvbuf is ignored on all procs except toProc. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ReduceC64(toProc int, op Op, dest, orig []complex64) error { - sendbuf := unsafe.Pointer(&orig[0]) - var recvbuf unsafe.Pointer - if dest != nil { - recvbuf = unsafe.Pointer(&dest[0]) - } - return Error(C.MPI_Reduce(sendbuf, recvbuf, C.int(len(dest)), C.COMPLEX64, op.ToC(), C.int(toProc), cm.comm), "ReduceC64") -} - -// AllReduceC64 reduces all values across procs to all procs from orig into dest using given operation. -// IMPORTANT: orig and dest must be different slices -// To do an in-place operation, set orig to nil -func (cm *Comm) AllReduceC64(op Op, dest, orig []complex64) error { - var sendbuf unsafe.Pointer - if orig != nil { - sendbuf = unsafe.Pointer(&orig[0]) - } else { - sendbuf = C.MPI_IN_PLACE - } - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Allreduce(sendbuf, recvbuf, C.int(len(dest)), C.COMPLEX64, op.ToC(), cm.comm), "AllReduceC64") -} - -// GatherC64 gathers values from all procs into toProc proc, tiled into dest of size np * len(orig). -// This is inverse of Scatter. -// recvbuf is ignored on all procs except toProc. -// IMPORTANT: orig and dest must be different slices. -func (cm *Comm) GatherC64(toProc int, dest, orig []complex64) error { - sendbuf := unsafe.Pointer(&orig[0]) - var recvbuf unsafe.Pointer - if dest != nil { - recvbuf = unsafe.Pointer(&dest[0]) - } - return Error(C.MPI_Gather(sendbuf, C.int(len(orig)), C.COMPLEX64, recvbuf, C.int(len(orig)), C.COMPLEX64, C.int(toProc), cm.comm), "GatherC64") -} - -// AllGatherC64 gathers values from all procs into all procs, -// tiled by proc into dest of size np * len(orig). -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllGatherC64(dest, orig []complex64) error { - sendbuf := unsafe.Pointer(&orig[0]) - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Allgather(sendbuf, C.int(len(orig)), C.COMPLEX64, recvbuf, C.int(len(orig)), C.COMPLEX64, cm.comm), "GatherC64") -} - -// ScatterC64 scatters values from fmProc to all procs, distributing len(dest) size chunks to -// each proc from orig slice, which must be of size np * len(dest). This is inverse of Gather. -// sendbuf is ignored on all procs except fmProc. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) ScatterC64(fmProc int, dest, orig []complex64) error { - var sendbuf unsafe.Pointer - if orig != nil { - sendbuf = unsafe.Pointer(&orig[0]) - } - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Scatter(sendbuf, C.int(len(dest)), C.COMPLEX64, recvbuf, C.int(len(dest)), C.COMPLEX64, C.int(fmProc), cm.comm), "GatherC64") -} diff --git a/base/mpi/numeric.gen.go.tmpl b/base/mpi/numeric.gen.go.tmpl deleted file mode 100644 index 79c01e3da0..0000000000 --- a/base/mpi/numeric.gen.go.tmpl +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright (c) 2020, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build mpi || mpich - -package mpi - -/* -#cgo mpi pkg-config: ompi -#cgo mpich pkg-config: mpich -#include "mpi.h" - -MPI_Datatype FLOAT64 = MPI_DOUBLE; -MPI_Datatype FLOAT32 = MPI_FLOAT; -MPI_Datatype INT64 = MPI_LONG; -MPI_Datatype UINT64 = MPI_UNSIGNED_LONG; -MPI_Datatype INT32 = MPI_INT; -MPI_Datatype UINT32 = MPI_UNSIGNED; -MPI_Datatype INT16 = MPI_SHORT; -MPI_Datatype UINT16 = MPI_UNSIGNED_SHORT; -MPI_Datatype BYTE = MPI_BYTE; -MPI_Datatype COMPLEX128 = MPI_DOUBLE_COMPLEX; -MPI_Datatype COMPLEX64 = MPI_COMPLEX; -MPI_Status* StIgnore = MPI_STATUS_IGNORE; -*/ -import "C" - -import ( - "fmt" - "log" - "unsafe" -) - -{{range .In}} - -// Send{{.Name}} sends values to toProc, using given unique tag identifier. -// This is Blocking. Must have a corresponding Recv call with same tag on toProc, from this proc -func (cm *Comm) Send{{.Name}}(toProc int, tag int, vals []{{or .Type}}) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Send(buf, C.int(len(vals)), C.{{or .CType}}, C.int(toProc), C.int(tag), cm.comm), "Send{{.Name}}") -} - -// Recv64{{.Name}} receives values from proc fmProc, using given unique tag identifier -// This is Blocking. Must have a corresponding Send call with same tag on fmProc, to this proc -func (cm *Comm) Recv{{.Name}}(fmProc int, tag int, vals []{{or .Type}}) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Recv(buf, C.int(len(vals)), C.{{or .CType}}, C.int(fmProc), C.int(tag), cm.comm, C.StIgnore), "Recv{{.Name}}") -} - -// Bcast{{.Name}} broadcasts slice from fmProc to all other procs. -// All nodes have the same vals after this call, copied from fmProc. -func (cm *Comm) Bcast{{.Name}}(fmProc int, vals []{{or .Type}}) error { - buf := unsafe.Pointer(&vals[0]) - return Error(C.MPI_Bcast(buf, C.int(len(vals)), C.{{or .CType}}, C.int(fmProc), cm.comm), "Bcast{{.Name}}") -} - -// Reduce{{.Name}} reduces all values across procs to toProc in orig to dest using given operation. -// recvbuf is ignored on all procs except toProc. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) Reduce{{.Name}}(toProc int, op Op, dest, orig []{{or .Type}}) error { - sendbuf := unsafe.Pointer(&orig[0]) - var recvbuf unsafe.Pointer - if dest != nil { - recvbuf = unsafe.Pointer(&dest[0]) - } - return Error(C.MPI_Reduce(sendbuf, recvbuf, C.int(len(dest)), C.{{or .CType}}, op.ToC(), C.int(toProc), cm.comm), "Reduce{{.Name}}") -} - -// AllReduce{{.Name}} reduces all values across procs to all procs from orig into dest using given operation. -// IMPORTANT: orig and dest must be different slices -// To do an in-place operation, set orig to nil -func (cm *Comm) AllReduce{{.Name}}(op Op, dest, orig []{{or .Type}}) error { - var sendbuf unsafe.Pointer - if orig != nil { - sendbuf = unsafe.Pointer(&orig[0]) - } else { - sendbuf = C.MPI_IN_PLACE - } - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Allreduce(sendbuf, recvbuf, C.int(len(dest)), C.{{or .CType}}, op.ToC(), cm.comm), "AllReduce{{.Name}}") -} - -// Gather{{.Name}} gathers values from all procs into toProc proc, tiled into dest of size np * len(orig). -// This is inverse of Scatter. -// recvbuf is ignored on all procs except toProc. -// IMPORTANT: orig and dest must be different slices. -func (cm *Comm) Gather{{.Name}}(toProc int, dest, orig []{{or .Type}}) error { - sendbuf := unsafe.Pointer(&orig[0]) - var recvbuf unsafe.Pointer - if dest != nil { - recvbuf = unsafe.Pointer(&dest[0]) - } - return Error(C.MPI_Gather(sendbuf, C.int(len(orig)), C.{{or .CType}}, recvbuf, C.int(len(orig)), C.{{or .CType}}, C.int(toProc), cm.comm), "Gather{{.Name}}") -} - -// AllGather{{.Name}} gathers values from all procs into all procs, -// tiled by proc into dest of size np * len(orig). -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) AllGather{{.Name}}(dest, orig []{{or .Type}}) error { - sendbuf := unsafe.Pointer(&orig[0]) - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Allgather(sendbuf, C.int(len(orig)), C.{{or .CType}}, recvbuf, C.int(len(orig)), C.{{or .CType}}, cm.comm), "Gather{{.Name}}") -} - -// Scatter{{.Name}} scatters values from fmProc to all procs, distributing len(dest) size chunks to -// each proc from orig slice, which must be of size np * len(dest). This is inverse of Gather. -// sendbuf is ignored on all procs except fmProc. -// IMPORTANT: orig and dest must be different slices -func (cm *Comm) Scatter{{.Name}}(fmProc int, dest, orig []{{or .Type}}) error { - var sendbuf unsafe.Pointer - if orig != nil { - sendbuf = unsafe.Pointer(&orig[0]) - } - recvbuf := unsafe.Pointer(&dest[0]) - return Error(C.MPI_Scatter(sendbuf, C.int(len(dest)), C.{{or .CType}}, recvbuf, C.int(len(dest)), C.{{or .CType}}, C.int(fmProc), cm.comm), "Gather{{.Name}}") -} - - -{{- end}} - diff --git a/base/mpi/numeric.tmpldata b/base/mpi/numeric.tmpldata deleted file mode 100644 index 8d2ab5ee03..0000000000 --- a/base/mpi/numeric.tmpldata +++ /dev/null @@ -1,94 +0,0 @@ -[ - { - "Name": "F64", - "Type": "float64", - "CType": "FLOAT64", - "Default": "0", - "Size": "8" - }, - { - "Name": "F32", - "Type": "float32", - "CType": "FLOAT32", - "Default": "0", - "Size": "4" - }, - { - "Name": "Int", - "Type": "int", - "CType": "INT64", - "Default": "0", - "Size": "8" - }, - { - "Name": "I64", - "Type": "int64", - "CType": "INT64", - "Default": "0", - "Size": "8" - }, - { - "Name": "U64", - "Type": "uint64", - "CType": "UINT64", - "Default": "0", - "Size": "8" - }, - { - "Name": "I32", - "Type": "int32", - "CType": "INT32", - "Default": "0", - "Size": "4" - }, - { - "Name": "U32", - "Type": "uint32", - "CType": "UINT32", - "Default": "0", - "Size": "4" - }, - { - "Name": "I16", - "Type": "int16", - "CType": "INT16", - "Default": "0", - "Size": "2" - }, - { - "Name": "U16", - "Type": "uint16", - "CType": "UINT16", - "Default": "0", - "Size": "2" - }, - { - "Name": "I8", - "Type": "int8", - "CType": "BYTE", - "Default": "0", - "Size": "1" - }, - { - "Name": "U8", - "Type": "uint8", - "CType": "BYTE", - "Default": "0", - "Size": "1" - }, - { - "Name": "C128", - "Type": "complex128", - "CType": "COMPLEX128", - "Default": "0", - "Size": "16" - }, - { - "Name": "C64", - "Type": "complex64", - "CType": "COMPLEX64", - "Default": "0", - "Size": "8" - } -] - diff --git a/base/mpi/printf.go b/base/mpi/printf.go deleted file mode 100644 index e990c010be..0000000000 --- a/base/mpi/printf.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2020, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mpi - -import "fmt" - -// PrintAllProcs causes mpi.Printf to print on all processors -- otherwise just 0 -var PrintAllProcs = false - -// Printf does fmt.Printf only on the 0 rank node (see also AllPrintf to do all) -// and PrintAllProcs var to override for debugging, and print all -func Printf(fs string, pars ...any) { - if !PrintAllProcs && WorldRank() > 0 { - return - } - if WorldRank() > 0 { - AllPrintf(fs, pars...) - } else { - fmt.Printf(fs, pars...) - } -} - -// AllPrintf does fmt.Printf on all nodes, with node rank printed first -// This is best for debugging MPI itself. -func AllPrintf(fs string, pars ...any) { - fs = fmt.Sprintf("P%d: ", WorldRank()) + fs - fmt.Printf(fs, pars...) -} - -// Println does fmt.Println only on the 0 rank node (see also AllPrintln to do all) -// and PrintAllProcs var to override for debugging, and print all -func Println(fs ...any) { - if !PrintAllProcs && WorldRank() > 0 { - return - } - if WorldRank() > 0 { - AllPrintln(fs...) - } else { - fmt.Println(fs...) - } -} - -// AllPrintln does fmt.Println on all nodes, with node rank printed first -// This is best for debugging MPI itself. -func AllPrintln(fs ...any) { - fsa := make([]any, len(fs)) - copy(fsa[1:], fs) - fsa[0] = fmt.Sprintf("P%d: ", WorldRank()) - fmt.Println(fsa...) -} diff --git a/base/randx/README.md b/base/randx/README.md deleted file mode 100644 index 2ac982dd23..0000000000 --- a/base/randx/README.md +++ /dev/null @@ -1,32 +0,0 @@ -# randx - -Package randx provides randomization functionality built on top of standard `math/rand` -random number generation functions. Includes: -* RandParams: specifies parameters for random number generation according to various distributions used e.g., for initializing random weights and generating random noise in neurons -* Permute*: basic convenience methods calling rand.Shuffle on e.g., []int slice - -Here are the distributions and how the parameters in `RandParams` map onto distributional parameters -- the `Mean` and `Var` are not the actual mean and variance of the distribution, but rather provide parameters roughly corresponding to these values, along with the extra `Par` value: - -```Go - // Binomial represents number of 1's in n (Par) random (Bernouli) trials of probability p (Var) - Binomial - - // Poisson represents number of events in interval, with event rate (lambda = Var) plus Mean - Poisson - - // Gamma represents maximum entropy distribution with two parameters: scaling parameter (Var) - // and shape parameter k (Par) plus Mean - Gamma - - // Gaussian normal with Var = stddev plus Mean - Gaussian - - // Beta with Var = alpha and Par = beta shape parameters - Beta - - // Mean is just the constant Mean, no randomness - Mean -``` - -See [distplot](distplot) for a program to plot the histograms of these different distributions as you vary the parameters. - diff --git a/base/randx/bool.go b/base/randx/bool.go deleted file mode 100644 index dd5040e82b..0000000000 --- a/base/randx/bool.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) 2019, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package randx - -// BoolP is a simple method to generate a true value with given probability -// (else false). It is just rand.Float64() < p but this is more readable -// and explicit. -// Optionally can pass a single Rand interface to use -- -// otherwise uses system global Rand source. -func BoolP(p float64, randOpt ...Rand) bool { - var rnd Rand - if len(randOpt) == 0 { - rnd = NewGlobalRand() - } else { - rnd = randOpt[0] - } - return rnd.Float64() < p -} - -// BoolP32 is a simple method to generate a true value with given probability -// (else false). It is just rand.Float32() < p but this is more readable -// and explicit. -// Optionally can pass a single Rand interface to use -- -// otherwise uses system global Rand source. -func BoolP32(p float32, randOpt ...Rand) bool { - var rnd Rand - if len(randOpt) == 0 { - rnd = NewGlobalRand() - } else { - rnd = randOpt[0] - } - return rnd.Float32() < p -} diff --git a/base/randx/dists.go b/base/randx/dists.go deleted file mode 100644 index e185e7dd32..0000000000 --- a/base/randx/dists.go +++ /dev/null @@ -1,284 +0,0 @@ -// Copyright (c) 2023, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package randx - -import ( - "math" -) - -// note: this file contains random distribution functions -// from gonum.org/v1/gonum/stat/distuv -// which we modified only to use the randx.Rand interface. -// BinomialGen returns binomial with n trials (par) each of probability p (var) -// Optionally can pass a single Rand interface to use -- -// otherwise uses system global Rand source. -func BinomialGen(n, p float64, randOpt ...Rand) float64 { - var rnd Rand - if len(randOpt) == 0 { - rnd = NewGlobalRand() - } else { - rnd = randOpt[0] - } - // NUMERICAL RECIPES IN C: THE ART OF SCIENTIFIC COMPUTING (ISBN 0-521-43108-5) - // p. 295-6 - // http://www.aip.de/groups/soe/local/numres/bookcpdf/c7-3.pdf - - porg := p - - if p > 0.5 { - p = 1 - p - } - am := n * p - - if n < 25 { - // Use direct method. - bnl := 0.0 - for i := 0; i < int(n); i++ { - if rnd.Float64() < p { - bnl++ - } - } - if p != porg { - return n - bnl - } - return bnl - } - - if am < 1 { - // Use rejection method with Poisson proposal. - const logM = 2.6e-2 // constant for rejection sampling (https://en.wikipedia.org/wiki/Rejection_sampling) - var bnl float64 - z := -p - pclog := (1 + 0.5*z) * z / (1 + (1+1.0/6*z)*z) // PadĆ© approximant of log(1 + x) - for { - bnl = 0.0 - t := 0.0 - for i := 0; i < int(n); i++ { - t += rnd.ExpFloat64() - if t >= am { - break - } - bnl++ - } - bnlc := n - bnl - z = -bnl / n - log1p := (1 + 0.5*z) * z / (1 + (1+1.0/6*z)*z) - t = (bnlc+0.5)*log1p + bnl - bnlc*pclog + 1/(12*bnlc) - am + logM // Uses Stirling's expansion of log(n!) - if rnd.ExpFloat64() >= t { - break - } - } - if p != porg { - return n - bnl - } - return bnl - } - // Original algorithm samples from a Poisson distribution with the - // appropriate expected value. However, the Poisson approximation is - // asymptotic such that the absolute deviation in probability is O(1/n). - // Rejection sampling produces exact variates with at worst less than 3% - // rejection with miminal additional computation. - - // Use rejection method with Cauchy proposal. - g, _ := math.Lgamma(n + 1) - plog := math.Log(p) - pclog := math.Log1p(-p) - sq := math.Sqrt(2 * am * (1 - p)) - for { - var em, y float64 - for { - y = math.Tan(math.Pi * rnd.Float64()) - em = sq*y + am - if em >= 0 && em < n+1 { - break - } - } - em = math.Floor(em) - lg1, _ := math.Lgamma(em + 1) - lg2, _ := math.Lgamma(n - em + 1) - t := 1.2 * sq * (1 + y*y) * math.Exp(g-lg1-lg2+em*plog+(n-em)*pclog) - if rnd.Float64() <= t { - if p != porg { - return n - em - } - return em - } - } -} - -// PoissonGen returns poisson variable, as number of events in interval, -// with event rate (lmb = Var) plus mean -// Optionally can pass a single Rand interface to use -- -// otherwise uses system global Rand source. -func PoissonGen(lambda float64, randOpt ...Rand) float64 { - // NUMERICAL RECIPES IN C: THE ART OF SCIENTIFIC COMPUTING (ISBN 0-521-43108-5) - // p. 294 - // - var rnd Rand - if len(randOpt) == 0 { - rnd = NewGlobalRand() - } else { - rnd = randOpt[0] - } - if lambda < 10.0 { - // Use direct method. - var em float64 - t := 0.0 - for { - t += rnd.ExpFloat64() - if t >= lambda { - break - } - em++ - } - return em - } - // Generate using: - // W. Hƶrmann. "The transformed rejection method for generating Poisson - // random variables." Insurance: Mathematics and Economics - // 12.1 (1993): 39-45. - - b := 0.931 + 2.53*math.Sqrt(lambda) - a := -0.059 + 0.02483*b - invalpha := 1.1239 + 1.1328/(b-3.4) - vr := 0.9277 - 3.6224/(b-2) - for { - U := rnd.Float64() - 0.5 - V := rnd.Float64() - us := 0.5 - math.Abs(U) - k := math.Floor((2*a/us+b)*U + lambda + 0.43) - if us >= 0.07 && V <= vr { - return k - } - if k <= 0 || (us < 0.013 && V > us) { - continue - } - lg, _ := math.Lgamma(k + 1) - if math.Log(V*invalpha/(a/(us*us)+b)) <= k*math.Log(lambda)-lambda-lg { - return k - } - } -} - -// GammaGen represents maximum entropy distribution with two parameters: -// a shape parameter (Alpha, Par in RandParams), -// and a scaling parameter (Beta, Var in RandParams). -// Optionally can pass a single Rand interface to use -- -// otherwise uses system global Rand source. -func GammaGen(alpha, beta float64, randOpt ...Rand) float64 { - const ( - // The 0.2 threshold is from https://www4.stat.ncsu.edu/~rmartin/Codes/rgamss.R - // described in detail in https://arxiv.org/abs/1302.1884. - smallAlphaThresh = 0.2 - ) - var rnd Rand - if len(randOpt) == 0 { - rnd = NewGlobalRand() - } else { - rnd = randOpt[0] - } - if beta <= 0 { - panic("GammaGen: beta <= 0") - } - - a := alpha - b := beta - switch { - case a <= 0: - panic("gamma: alpha <= 0") - case a == 1: - // Generate from exponential - return rnd.ExpFloat64() / b - case a < smallAlphaThresh: - // Generate using - // Liu, Chuanhai, Martin, Ryan and Syring, Nick. "Simulating from a - // gamma distribution with small shape parameter" - // https://arxiv.org/abs/1302.1884 - // use this reference: http://link.springer.com/article/10.1007/s00180-016-0692-0 - - // Algorithm adjusted to work in log space as much as possible. - lambda := 1/a - 1 - lr := -math.Log1p(1 / lambda / math.E) - for { - e := rnd.ExpFloat64() - var z float64 - if e >= -lr { - z = e + lr - } else { - z = -rnd.ExpFloat64() / lambda - } - eza := math.Exp(-z / a) - lh := -z - eza - var lEta float64 - if z >= 0 { - lEta = -z - } else { - lEta = -1 + lambda*z - } - if lh-lEta > -rnd.ExpFloat64() { - return eza / b - } - } - case a >= smallAlphaThresh: - // Generate using: - // Marsaglia, George, and Wai Wan Tsang. "A simple method for generating - // gamma variables." ACM Transactions on Mathematical Software (TOMS) - // 26.3 (2000): 363-372. - d := a - 1.0/3 - m := 1.0 - if a < 1 { - d += 1.0 - m = math.Pow(rnd.Float64(), 1/a) - } - c := 1 / (3 * math.Sqrt(d)) - for { - x := rnd.NormFloat64() - v := 1 + x*c - if v <= 0.0 { - continue - } - v = v * v * v - u := rnd.Float64() - if u < 1.0-0.0331*(x*x)*(x*x) { - return m * d * v / b - } - if math.Log(u) < 0.5*x*x+d*(1-v+math.Log(v)) { - return m * d * v / b - } - } - } - panic("unreachable") -} - -// GaussianGen returns gaussian (normal) random number with given -// mean and sigma standard deviation. -// Optionally can pass a single Rand interface to use -- -// otherwise uses system global Rand source. -func GaussianGen(mean, sigma float64, randOpt ...Rand) float64 { - var rnd Rand - if len(randOpt) == 0 { - rnd = NewGlobalRand() - } else { - rnd = randOpt[0] - } - return mean + sigma*rnd.NormFloat64() -} - -// BetaGen returns beta random number with two shape parameters -// alpha > 0 and beta > 0 -// Optionally can pass a single Rand interface to use -- -// otherwise uses system global Rand source. -func BetaGen(alpha, beta float64, randOpt ...Rand) float64 { - var rnd Rand - if len(randOpt) == 0 { - rnd = NewGlobalRand() - } else { - rnd = randOpt[0] - } - ga := GammaGen(alpha, 1, rnd) - gb := GammaGen(beta, 1, rnd) - - return ga / (ga + gb) -} diff --git a/base/randx/dists_test.go b/base/randx/dists_test.go deleted file mode 100644 index 059dd031d8..0000000000 --- a/base/randx/dists_test.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright (c) 2023, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package randx - -import ( - "math" - "testing" - - "cogentcore.org/core/base/errors" - "cogentcore.org/core/tensor/stats/stats" - "cogentcore.org/core/tensor/table" -) - -func TestGaussianGen(t *testing.T) { - nsamp := int(1e6) - dt := &table.Table{} - dt.AddFloat32Column("Val") - dt.SetNumRows(nsamp) - - mean := 0.5 - sig := 0.25 - tol := 1e-2 - - for i := 0; i < nsamp; i++ { - vl := GaussianGen(mean, sig) - dt.SetFloat("Val", i, vl) - } - ix := table.NewIndexView(dt) - desc := stats.DescAll(ix) - - meanRow := errors.Log1(desc.RowsByString("Stat", "Mean", table.Equals, table.UseCase))[0] - stdRow := errors.Log1(desc.RowsByString("Stat", "Std", table.Equals, table.UseCase))[0] - // minRow := errors.Log1(desc.RowsByString("Stat", "Min", table.Equals, table.UseCase))[0] - // maxRow := errors.Log1(desc.RowsByString("Stat", "Max", table.Equals, table.UseCase))[0] - - actMean := desc.Float("Val", meanRow) - actStd := desc.Float("Val", stdRow) - - if math.Abs(actMean-mean) > tol { - t.Errorf("Gaussian: mean %g\t out of tolerance vs target: %g\n", actMean, mean) - } - if math.Abs(actStd-sig) > tol { - t.Errorf("Gaussian: stdev %g\t out of tolerance vs target: %g\n", actStd, sig) - } - // b := bytes.NewBuffer(nil) - // desc.WriteCSV(b, table.Tab, table.Headers) - // fmt.Printf("%s\n", string(b.Bytes())) -} - -func TestBinomialGen(t *testing.T) { - nsamp := int(1e6) - dt := &table.Table{} - dt.AddFloat32Column("Val") - dt.SetNumRows(nsamp) - - n := 1.0 - p := 0.5 - tol := 1e-2 - - for i := 0; i < nsamp; i++ { - vl := BinomialGen(n, p) - dt.SetFloat("Val", i, vl) - } - ix := table.NewIndexView(dt) - desc := stats.DescAll(ix) - - meanRow := errors.Log1(desc.RowsByString("Stat", "Mean", table.Equals, table.UseCase))[0] - stdRow := errors.Log1(desc.RowsByString("Stat", "Std", table.Equals, table.UseCase))[0] - minRow := errors.Log1(desc.RowsByString("Stat", "Min", table.Equals, table.UseCase))[0] - maxRow := errors.Log1(desc.RowsByString("Stat", "Max", table.Equals, table.UseCase))[0] - - actMean := desc.Float("Val", meanRow) - actStd := desc.Float("Val", stdRow) - actMin := desc.Float("Val", minRow) - actMax := desc.Float("Val", maxRow) - - mean := n * p - if math.Abs(actMean-mean) > tol { - t.Errorf("Binomial: mean %g\t out of tolerance vs target: %g\n", actMean, mean) - } - sig := math.Sqrt(n * p * (1.0 - p)) - if math.Abs(actStd-sig) > tol { - t.Errorf("Binomial: stdev %g\t out of tolerance vs target: %g\n", actStd, sig) - } - if actMin < 0 { - t.Errorf("Binomial: min %g\t should not be < 0\n", actMin) - } - if actMax < 0 { - t.Errorf("Binomial: max %g\t should not be > 1\n", actMax) - } - // b := bytes.NewBuffer(nil) - // desc.WriteCSV(b, table.Tab, table.Headers) - // fmt.Printf("%s\n", string(b.Bytes())) -} - -func TestPoissonGen(t *testing.T) { - nsamp := int(1e6) - dt := &table.Table{} - dt.AddFloat32Column("Val") - dt.SetNumRows(nsamp) - - lambda := 10.0 - tol := 1e-2 - - for i := 0; i < nsamp; i++ { - vl := PoissonGen(lambda) - dt.SetFloat("Val", i, vl) - } - ix := table.NewIndexView(dt) - desc := stats.DescAll(ix) - - meanRow := errors.Log1(desc.RowsByString("Stat", "Mean", table.Equals, table.UseCase))[0] - stdRow := errors.Log1(desc.RowsByString("Stat", "Std", table.Equals, table.UseCase))[0] - minRow := errors.Log1(desc.RowsByString("Stat", "Min", table.Equals, table.UseCase))[0] - // maxRow := errors.Log1(desc.RowsByString("Stat", "Max", table.Equals, table.UseCase))[0] - - actMean := desc.Float("Val", meanRow) - actStd := desc.Float("Val", stdRow) - actMin := desc.Float("Val", minRow) - // actMax := desc.Float("Val", maxRow) - - mean := lambda - if math.Abs(actMean-mean) > tol { - t.Errorf("Poisson: mean %g\t out of tolerance vs target: %g\n", actMean, mean) - } - sig := math.Sqrt(lambda) - if math.Abs(actStd-sig) > tol { - t.Errorf("Poisson: stdev %g\t out of tolerance vs target: %g\n", actStd, sig) - } - if actMin < 0 { - t.Errorf("Poisson: min %g\t should not be < 0\n", actMin) - } - // if actMax < 0 { - // t.Errorf("Poisson: max %g\t should not be > 1\n", actMax) - // } - // b := bytes.NewBuffer(nil) - // desc.WriteCSV(b, table.Tab, table.Headers) - // fmt.Printf("%s\n", string(b.Bytes())) -} - -func TestGammaGen(t *testing.T) { - nsamp := int(1e6) - dt := &table.Table{} - dt.AddFloat32Column("Val") - dt.SetNumRows(nsamp) - - alpha := 0.5 - beta := 0.8 - tol := 1e-2 - - for i := 0; i < nsamp; i++ { - vl := GammaGen(alpha, beta) - dt.SetFloat("Val", i, vl) - } - ix := table.NewIndexView(dt) - desc := stats.DescAll(ix) - - meanRow := errors.Log1(desc.RowsByString("Stat", "Mean", table.Equals, table.UseCase))[0] - stdRow := errors.Log1(desc.RowsByString("Stat", "Std", table.Equals, table.UseCase))[0] - - actMean := desc.Float("Val", meanRow) - actStd := desc.Float("Val", stdRow) - - mean := alpha / beta - if math.Abs(actMean-mean) > tol { - t.Errorf("Gamma: mean %g\t out of tolerance vs target: %g\n", actMean, mean) - } - sig := math.Sqrt(alpha / beta / beta) - if math.Abs(actStd-sig) > tol { - t.Errorf("Gamma: stdev %g\t out of tolerance vs target: %g\n", actStd, sig) - } - // b := bytes.NewBuffer(nil) - // desc.WriteCSV(b, table.Tab, table.Headers) - // fmt.Printf("%s\n", string(b.Bytes())) -} - -func TestBetaGen(t *testing.T) { - nsamp := int(1e6) - dt := &table.Table{} - dt.AddFloat32Column("Val") - dt.SetNumRows(nsamp) - - alpha := 0.5 - beta := 0.8 - tol := 1e-2 - - for i := 0; i < nsamp; i++ { - vl := BetaGen(alpha, beta) - dt.SetFloat("Val", i, vl) - } - ix := table.NewIndexView(dt) - desc := stats.DescAll(ix) - - meanRow := errors.Log1(desc.RowsByString("Stat", "Mean", table.Equals, table.UseCase))[0] - stdRow := errors.Log1(desc.RowsByString("Stat", "Std", table.Equals, table.UseCase))[0] - - actMean := desc.Float("Val", meanRow) - actStd := desc.Float("Val", stdRow) - - mean := alpha / (alpha + beta) - if math.Abs(actMean-mean) > tol { - t.Errorf("Beta: mean %g\t out of tolerance vs target: %g\n", actMean, mean) - } - vr := alpha * beta / ((alpha + beta) * (alpha + beta) * (alpha + beta + 1)) - sig := math.Sqrt(vr) - if math.Abs(actStd-sig) > tol { - t.Errorf("Beta: stdev %g\t out of tolerance vs target: %g\n", actStd, sig) - } - // b := bytes.NewBuffer(nil) - // desc.WriteCSV(b, table.Tab, table.Headers) - // fmt.Printf("%s\n", string(b.Bytes())) -} diff --git a/base/randx/doc.go b/base/randx/doc.go deleted file mode 100644 index d99467a5e0..0000000000 --- a/base/randx/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) 2019, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package randx provides randomization functionality built on top of standard math/rand -// random number generation functions. -// -// randx.Rand is an interface that enables calling the standard global rand functions, -// or a rand.Rand separate source, and is used for all methods in this package. -// Methods also take a thr thread arg to support a random generator that handles separate -// threads, such as gosl/slrand. -// -// randx.StdRand implements the interface. -// -// - RandParams: specifies parameters for random number generation according to various distributions, -// used e.g., for initializing random weights and generating random noise in neurons -// -// - Permute*: basic convenience methods calling rand.Shuffle on e.g., []int slice -// -// - BoolP: boolean for given probability -package randx diff --git a/base/randx/enumgen.go b/base/randx/enumgen.go deleted file mode 100644 index 4e0aa2c0b2..0000000000 --- a/base/randx/enumgen.go +++ /dev/null @@ -1,50 +0,0 @@ -// Code generated by "core generate -add-types"; DO NOT EDIT. - -package randx - -import ( - "cogentcore.org/core/enums" -) - -var _RandDistsValues = []RandDists{0, 1, 2, 3, 4, 5, 6} - -// RandDistsN is the highest valid value for type RandDists, plus one. -const RandDistsN RandDists = 7 - -var _RandDistsValueMap = map[string]RandDists{`Uniform`: 0, `Binomial`: 1, `Poisson`: 2, `Gamma`: 3, `Gaussian`: 4, `Beta`: 5, `Mean`: 6} - -var _RandDistsDescMap = map[RandDists]string{0: `Uniform has a uniform probability distribution over Var = range on either side of the Mean`, 1: `Binomial represents number of 1's in n (Par) random (Bernouli) trials of probability p (Var)`, 2: `Poisson represents number of events in interval, with event rate (lambda = Var) plus Mean`, 3: `Gamma represents maximum entropy distribution with two parameters: scaling parameter (Var) and shape parameter k (Par) plus Mean`, 4: `Gaussian normal with Var = stddev plus Mean`, 5: `Beta with Var = alpha and Par = beta shape parameters`, 6: `Mean is just the constant Mean, no randomness`} - -var _RandDistsMap = map[RandDists]string{0: `Uniform`, 1: `Binomial`, 2: `Poisson`, 3: `Gamma`, 4: `Gaussian`, 5: `Beta`, 6: `Mean`} - -// String returns the string representation of this RandDists value. -func (i RandDists) String() string { return enums.String(i, _RandDistsMap) } - -// SetString sets the RandDists value from its string representation, -// and returns an error if the string is invalid. -func (i *RandDists) SetString(s string) error { - return enums.SetString(i, s, _RandDistsValueMap, "RandDists") -} - -// Int64 returns the RandDists value as an int64. -func (i RandDists) Int64() int64 { return int64(i) } - -// SetInt64 sets the RandDists value from an int64. -func (i *RandDists) SetInt64(in int64) { *i = RandDists(in) } - -// Desc returns the description of the RandDists value. -func (i RandDists) Desc() string { return enums.Desc(i, _RandDistsDescMap) } - -// RandDistsValues returns all possible values for the type RandDists. -func RandDistsValues() []RandDists { return _RandDistsValues } - -// Values returns all possible values for the type RandDists. -func (i RandDists) Values() []enums.Enum { return enums.Values(_RandDistsValues) } - -// MarshalText implements the [encoding.TextMarshaler] interface. -func (i RandDists) MarshalText() ([]byte, error) { return []byte(i.String()), nil } - -// UnmarshalText implements the [encoding.TextUnmarshaler] interface. -func (i *RandDists) UnmarshalText(text []byte) error { - return enums.UnmarshalText(i, text, "RandDists") -} diff --git a/base/randx/pchoose.go b/base/randx/pchoose.go deleted file mode 100644 index 516743912f..0000000000 --- a/base/randx/pchoose.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2019, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package randx - -// PChoose32 chooses an index in given slice of float32's at random according -// to the probilities of each item (must be normalized to sum to 1). -// Optionally can pass a single Rand interface to use -- -// otherwise uses system global Rand source. -func PChoose32(ps []float32, randOpt ...Rand) int { - var rnd Rand - if len(randOpt) == 0 { - rnd = NewGlobalRand() - } else { - rnd = randOpt[0] - } - pv := rnd.Float32() - sum := float32(0) - for i, p := range ps { - sum += p - if pv < sum { // note: lower values already excluded - return i - } - } - return len(ps) - 1 -} - -// PChoose64 chooses an index in given slice of float64's at random according -// to the probilities of each item (must be normalized to sum to 1) -// Optionally can pass a single Rand interface to use -- -// otherwise uses system global Rand source. -func PChoose64(ps []float64, randOpt ...Rand) int { - var rnd Rand - if len(randOpt) == 0 { - rnd = NewGlobalRand() - } else { - rnd = randOpt[0] - } - pv := rnd.Float64() - sum := float64(0) - for i, p := range ps { - sum += p - if pv < sum { // note: lower values already excluded - return i - } - } - return len(ps) - 1 -} diff --git a/base/randx/permute.go b/base/randx/permute.go deleted file mode 100644 index 28a62053fd..0000000000 --- a/base/randx/permute.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (c) 2019, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package randx - -// SequentialInts initializes slice of ints to sequential start..start+N-1 -// numbers -- for cases where permuting the order is optional. -func SequentialInts(ins []int, start int) { - for i := range ins { - ins[i] = start + i - } -} - -// PermuteInts permutes (shuffles) the order of elements in the given int slice -// using the standard Fisher-Yates shuffle -// https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle -// So you don't have to remember how to call rand.Shuffle. -// Optionally can pass a single Rand interface to use -- -// otherwise uses system global Rand source. -func PermuteInts(ins []int, randOpt ...Rand) { - var rnd Rand - if len(randOpt) == 0 { - rnd = NewGlobalRand() - } else { - rnd = randOpt[0] - } - rnd.Shuffle(len(ins), func(i, j int) { - ins[i], ins[j] = ins[j], ins[i] - }) -} - -// PermuteStrings permutes (shuffles) the order of elements in the given string slice -// using the standard Fisher-Yates shuffle -// https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle -// So you don't have to remember how to call rand.Shuffle -// Optionally can pass a single Rand interface to use -- -// otherwise uses system global Rand source. -func PermuteStrings(ins []string, randOpt ...Rand) { - var rnd Rand - if len(randOpt) == 0 { - rnd = NewGlobalRand() - } else { - rnd = randOpt[0] - } - rnd.Shuffle(len(ins), func(i, j int) { - ins[i], ins[j] = ins[j], ins[i] - }) -} - -// PermuteFloat32s permutes (shuffles) the order of elements in the given float32 slice -// using the standard Fisher-Yates shuffle -// https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle -// So you don't have to remember how to call rand.Shuffle -// Optionally can pass a single Rand interface to use -- -// otherwise uses system global Rand source. -func PermuteFloat32s(ins []float32, randOpt ...Rand) { - var rnd Rand - if len(randOpt) == 0 { - rnd = NewGlobalRand() - } else { - rnd = randOpt[0] - } - rnd.Shuffle(len(ins), func(i, j int) { - ins[i], ins[j] = ins[j], ins[i] - }) -} - -// PermuteFloat64s permutes (shuffles) the order of elements in the given float64 slice -// using the standard Fisher-Yates shuffle -// https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle -// So you don't have to remember how to call rand.Shuffle -// Optionally can pass a single Rand interface to use -- -// otherwise uses system global Rand source. -func PermuteFloat64s(ins []float64, randOpt ...Rand) { - var rnd Rand - if len(randOpt) == 0 { - rnd = NewGlobalRand() - } else { - rnd = randOpt[0] - } - rnd.Shuffle(len(ins), func(i, j int) { - ins[i], ins[j] = ins[j], ins[i] - }) -} diff --git a/base/randx/rand.go b/base/randx/rand.go deleted file mode 100644 index 560cae91d3..0000000000 --- a/base/randx/rand.go +++ /dev/null @@ -1,250 +0,0 @@ -// Copyright (c) 2023, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package randx - -//go:generate core generate -add-types - -import "math/rand" - -// Rand provides an interface with most of the standard -// rand.Rand methods, to support the use of either the -// global rand generator or a separate Rand source. -type Rand interface { - // Seed uses the provided seed value to initialize the generator to a deterministic state. - // Seed should not be called concurrently with any other Rand method. - Seed(seed int64) - - // Int63 returns a non-negative pseudo-random 63-bit integer as an int64. - Int63() int64 - - // Uint32 returns a pseudo-random 32-bit value as a uint32. - Uint32() uint32 - - // Uint64 returns a pseudo-random 64-bit value as a uint64. - Uint64() uint64 - - // Int31 returns a non-negative pseudo-random 31-bit integer as an int32. - Int31() int32 - - // Int returns a non-negative pseudo-random int. - Int() int - - // Int63n returns, as an int64, a non-negative pseudo-random number in the half-open interval [0,n). - // It panics if n <= 0. - Int63n(n int64) int64 - - // Int31n returns, as an int32, a non-negative pseudo-random number in the half-open interval [0,n). - // It panics if n <= 0. - Int31n(n int32) int32 - - // Intn returns, as an int, a non-negative pseudo-random number in the half-open interval [0,n). - // It panics if n <= 0. - Intn(n int) int - - // Float64 returns, as a float64, a pseudo-random number in the half-open interval [0.0,1.0). - Float64() float64 - - // Float32 returns, as a float32, a pseudo-random number in the half-open interval [0.0,1.0). - Float32() float32 - - // NormFloat64 returns a normally distributed float64 in the range - // [-math.MaxFloat64, +math.MaxFloat64] with - // standard normal distribution (mean = 0, stddev = 1) - // from the default Source. - // To produce a different normal distribution, callers can - // adjust the output using: - // - // sample = NormFloat64() * desiredStdDev + desiredMean - NormFloat64() float64 - - // ExpFloat64 returns an exponentially distributed float64 in the range - // (0, +math.MaxFloat64] with an exponential distribution whose rate parameter - // (lambda) is 1 and whose mean is 1/lambda (1) from the default Source. - // To produce a distribution with a different rate parameter, - // callers can adjust the output using: - // - // sample = ExpFloat64() / desiredRateParameter - ExpFloat64() float64 - - // Perm returns, as a slice of n ints, a pseudo-random permutation of the integers - // in the half-open interval [0,n). - Perm(n int) []int - - // Shuffle pseudo-randomizes the order of elements. - // n is the number of elements. Shuffle panics if n < 0. - // swap swaps the elements with indexes i and j. - Shuffle(n int, swap func(i, j int)) -} - -// SysRand supports the system random number generator -// for either a separate rand.Rand source, or, if that -// is nil, the global rand stream. -type SysRand struct { - - // if non-nil, use this random number source instead of the global default one - Rand *rand.Rand `display:"-"` -} - -// NewGlobalRand returns a new SysRand that implements the -// randx.Rand interface, with the system global rand source. -func NewGlobalRand() *SysRand { - r := &SysRand{} - return r -} - -// NewSysRand returns a new SysRand with a new -// rand.Rand random source with given initial seed. -func NewSysRand(seed int64) *SysRand { - r := &SysRand{} - r.NewRand(seed) - return r -} - -// NewRand sets Rand to a new rand.Rand source using given seed. -func (r *SysRand) NewRand(seed int64) { - r.Rand = rand.New(rand.NewSource(seed)) -} - -// Seed uses the provided seed value to initialize the generator to a deterministic state. -// Seed should not be called concurrently with any other Rand method. -func (r *SysRand) Seed(seed int64) { - if r.Rand == nil { - rand.Seed(seed) - return - } - r.Rand.Seed(seed) -} - -// Int63 returns a non-negative pseudo-random 63-bit integer as an int64. -func (r *SysRand) Int63() int64 { - if r.Rand == nil { - return rand.Int63() - } - return r.Rand.Int63() -} - -// Uint32 returns a pseudo-random 32-bit value as a uint32. -func (r *SysRand) Uint32() uint32 { - if r.Rand == nil { - return rand.Uint32() - } - return r.Rand.Uint32() -} - -// Uint64 returns a pseudo-random 64-bit value as a uint64. -func (r *SysRand) Uint64() uint64 { - if r.Rand == nil { - return rand.Uint64() - } - return r.Rand.Uint64() -} - -// Int31 returns a non-negative pseudo-random 31-bit integer as an int32. -func (r *SysRand) Int31() int32 { - if r.Rand == nil { - return rand.Int31() - } - return r.Rand.Int31() -} - -// Int returns a non-negative pseudo-random int. -func (r *SysRand) Int() int { - if r.Rand == nil { - return rand.Int() - } - return r.Rand.Int() -} - -// Int63n returns, as an int64, a non-negative pseudo-random number in the half-open interval [0,n). -// It panics if n <= 0. -func (r *SysRand) Int63n(n int64) int64 { - if r.Rand == nil { - return rand.Int63n(n) - } - return r.Rand.Int63n(n) -} - -// Int31n returns, as an int32, a non-negative pseudo-random number in the half-open interval [0,n). -// It panics if n <= 0. -func (r *SysRand) Int31n(n int32) int32 { - if r.Rand == nil { - return rand.Int31n(n) - } - return r.Rand.Int31n(n) -} - -// Intn returns, as an int, a non-negative pseudo-random number in the half-open interval [0,n). -// It panics if n <= 0. -func (r *SysRand) Intn(n int) int { - if r.Rand == nil { - return rand.Intn(n) - } - return r.Rand.Intn(n) -} - -// Float64 returns, as a float64, a pseudo-random number in the half-open interval [0.0,1.0). -func (r *SysRand) Float64() float64 { - if r.Rand == nil { - return rand.Float64() - } - return r.Rand.Float64() -} - -// Float32 returns, as a float32, a pseudo-random number in the half-open interval [0.0,1.0). -func (r *SysRand) Float32() float32 { - if r.Rand == nil { - return rand.Float32() - } - return r.Rand.Float32() -} - -// NormFloat64 returns a normally distributed float64 in the range -// [-math.MaxFloat64, +math.MaxFloat64] with -// standard normal distribution (mean = 0, stddev = 1) -// from the default Source. -// To produce a different normal distribution, callers can -// adjust the output using: -// -// sample = NormFloat64() * desiredStdDev + desiredMean -func (r *SysRand) NormFloat64() float64 { - if r.Rand == nil { - return rand.NormFloat64() - } - return r.Rand.NormFloat64() -} - -// ExpFloat64 returns an exponentially distributed float64 in the range -// (0, +math.MaxFloat64] with an exponential distribution whose rate parameter -// (lambda) is 1 and whose mean is 1/lambda (1) from the default Source. -// To produce a distribution with a different rate parameter, -// callers can adjust the output using: -// -// sample = ExpFloat64() / desiredRateParameter -func (r *SysRand) ExpFloat64() float64 { - if r.Rand == nil { - return rand.ExpFloat64() - } - return r.Rand.ExpFloat64() -} - -// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers -// in the half-open interval [0,n). -func (r *SysRand) Perm(n int) []int { - if r.Rand == nil { - return rand.Perm(n) - } - return r.Rand.Perm(n) -} - -// Shuffle pseudo-randomizes the order of elements. -// n is the number of elements. Shuffle panics if n < 0. -// swap swaps the elements with indexes i and j. -func (r *SysRand) Shuffle(n int, swap func(i, j int)) { - if r.Rand == nil { - rand.Shuffle(n, swap) - return - } - r.Rand.Shuffle(n, swap) -} diff --git a/base/randx/randx_test.go b/base/randx/randx_test.go deleted file mode 100644 index d18faabd5f..0000000000 --- a/base/randx/randx_test.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) 2019, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package randx - -import ( - "math" - "testing" - - "cogentcore.org/core/base/num" -) - -func TestPoisson(t *testing.T) { - // t.Skip() - vr := 8.0 - mi := 30 - rnd := NewGlobalRand() - pd := make([]int, mi) - for i := 0; i < 100000; i++ { - kv := int(PoissonGen(vr, rnd)) - // fmt.Printf("poisson: %d\n", kv) - if kv < mi { - pd[kv]++ - } - } - - ed := make([]int, 30) - li := 0 - ep := math.Exp(-vr) - p := 1.0 - for i := 0; i < 1000000; i++ { - p *= rnd.Float64() - if p <= ep { - d := i - li - if d < mi { - ed[d]++ - } - li = i - p = 1 - } - } - - mxi := 0 - mxe := 0 - im := 0 - em := 0 - for i := 0; i < mi; i++ { - v := pd[i] - if v > mxi { - mxi = v - im = i - } - v = ed[i] - if v > mxe { - mxe = v - em = i - } - } - // fmt.Printf("pd: %v\n", pd) - // fmt.Printf("max idx: %d\n", im) - // fmt.Printf("ed: %v\n", ed) - // fmt.Printf("max idx: %d\n", em) - if num.Abs(im-int(vr)) > 1 { - t.Errorf("mode != lambda: %d != %d (tol 1)\n", im, int(vr)) - } - if num.Abs(em-int(vr)) > 1 { - t.Errorf("empirical mode != lambda: %d != %d (tol 1)\n", em, int(vr)) - } -} diff --git a/base/randx/rndparams.go b/base/randx/rndparams.go deleted file mode 100644 index 7b22f524f3..0000000000 --- a/base/randx/rndparams.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright (c) 2019, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package randx - -// RandParams provides parameterized random number generation according to different distributions -// and variance, mean params -type RandParams struct { //git:add - - // distribution to generate random numbers from - Dist RandDists - - // mean of random distribution -- typically added to generated random variants - Mean float64 - - // variability parameter for the random numbers (gauss = standard deviation, not variance; uniform = half-range, others as noted in RandDists) - Var float64 - - // extra parameter for distribution (depends on each one) - Par float64 -} - -func (rp *RandParams) Defaults() { - rp.Var = 1 - rp.Par = 1 -} - -func (rp *RandParams) ShouldDisplay(field string) bool { - switch field { - case "Par": - return rp.Dist == Gamma || rp.Dist == Binomial || rp.Dist == Beta - } - return true -} - -// Gen generates a random variable according to current parameters. - -// Optionally can pass a single Rand interface to use -- -// otherwise uses system global Rand source. -func (rp *RandParams) Gen(randOpt ...Rand) float64 { - var rnd Rand - if len(randOpt) == 0 { - rnd = NewGlobalRand() - } else { - rnd = randOpt[0] - } - switch rp.Dist { - case Uniform: - return UniformMeanRange(rp.Mean, rp.Var, rnd) - case Binomial: - return rp.Mean + BinomialGen(rp.Par, rp.Var, rnd) - case Poisson: - return rp.Mean + PoissonGen(rp.Var, rnd) - case Gamma: - return rp.Mean + GammaGen(rp.Par, rp.Var, rnd) - case Gaussian: - return GaussianGen(rp.Mean, rp.Var, rnd) - case Beta: - return rp.Mean + BetaGen(rp.Var, rp.Par, rnd) - } - return rp.Mean -} - -// RandDists are different random number distributions -type RandDists int32 //enums:enum - -// The random number distributions -const ( - // Uniform has a uniform probability distribution over Var = range on either side of the Mean - Uniform RandDists = iota - - // Binomial represents number of 1's in n (Par) random (Bernouli) trials of probability p (Var) - Binomial - - // Poisson represents number of events in interval, with event rate (lambda = Var) plus Mean - Poisson - - // Gamma represents maximum entropy distribution with two parameters: scaling parameter (Var) - // and shape parameter k (Par) plus Mean - Gamma - - // Gaussian normal with Var = stddev plus Mean - Gaussian - - // Beta with Var = alpha and Par = beta shape parameters - Beta - - // Mean is just the constant Mean, no randomness - Mean -) - -// IntZeroN returns uniform random integer in the range between 0 and n, exclusive of n: [0,n). -// Thr is an optional parallel thread index (-1 0 to ignore). -// Optionally can pass a single Rand interface to use -- -// otherwise uses system global Rand source. -func IntZeroN(n int64, randOpt ...Rand) int64 { - var rnd Rand - if len(randOpt) == 0 { - rnd = NewGlobalRand() - } else { - rnd = randOpt[0] - } - return rnd.Int63n(n) -} - -// IntMinMax returns uniform random integer in range between min and max, exclusive of max: [min,max). - -// Optionally can pass a single Rand interface to use -- -// otherwise uses system global Rand source. -func IntMinMax(min, max int64, randOpt ...Rand) int64 { - var rnd Rand - if len(randOpt) == 0 { - rnd = NewGlobalRand() - } else { - rnd = randOpt[0] - } - return min + rnd.Int63n(max-min) -} - -// IntMeanRange returns uniform random integer with given range on either side of the mean: -// [mean - range, mean + range] - -// Optionally can pass a single Rand interface to use -- -// otherwise uses system global Rand source. -func IntMeanRange(mean, rnge int64, randOpt ...Rand) int64 { - var rnd Rand - if len(randOpt) == 0 { - rnd = NewGlobalRand() - } else { - rnd = randOpt[0] - } - return mean + (rnd.Int63n(2*rnge+1) - rnge) -} - -// ZeroOne returns a uniform random number between zero and one (exclusive of 1) - -// Optionally can pass a single Rand interface to use -- -// otherwise uses system global Rand source. -func ZeroOne(randOpt ...Rand) float64 { - var rnd Rand - if len(randOpt) == 0 { - rnd = NewGlobalRand() - } else { - rnd = randOpt[0] - } - return rnd.Float64() -} - -// UniformMinMax returns uniform random number between min and max values inclusive -// (Do not use for generating integers - will not include max!) - -// Optionally can pass a single Rand interface to use -- -// otherwise uses system global Rand source. -func UniformMinMax(min, max float64, randOpt ...Rand) float64 { - var rnd Rand - if len(randOpt) == 0 { - rnd = NewGlobalRand() - } else { - rnd = randOpt[0] - } - return min + (max-min)*rnd.Float64() -} - -// UniformMeanRange returns uniform random number with given range on either size of the mean: -// [mean - range, mean + range] - -// Optionally can pass a single Rand interface to use -- -// otherwise uses system global Rand source. -func UniformMeanRange(mean, rnge float64, randOpt ...Rand) float64 { - var rnd Rand - if len(randOpt) == 0 { - rnd = NewGlobalRand() - } else { - rnd = randOpt[0] - } - return mean + rnge*2.0*(rnd.Float64()-0.5) -} diff --git a/base/randx/seeds.go b/base/randx/seeds.go deleted file mode 100644 index f45ba864a1..0000000000 --- a/base/randx/seeds.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (c) 2019, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package randx - -import ( - "time" -) - -// Seeds is a set of random seeds, typically used one per Run -type Seeds []int64 - -// Init allocates given number of seeds and initializes them to -// sequential numbers 1..n -func (rs *Seeds) Init(n int) { - *rs = make([]int64, n) - for i := range *rs { - (*rs)[i] = int64(i) + 1 - } -} - -// Set sets the given seed to either the single Rand -// interface passed, or the system global Rand source. -func (rs *Seeds) Set(idx int, randOpt ...Rand) { - var rnd Rand - if len(randOpt) == 0 { - rnd = NewGlobalRand() - } else { - rnd = randOpt[0] - } - rnd.Seed((*rs)[idx]) -} - -// NewSeeds sets a new set of random seeds based on current time -func (rs *Seeds) NewSeeds() { - rn := time.Now().UnixNano() - for i := range *rs { - (*rs)[i] = rn + int64(i) - } -} diff --git a/base/randx/typegen.go b/base/randx/typegen.go deleted file mode 100644 index 32ddeddbb7..0000000000 --- a/base/randx/typegen.go +++ /dev/null @@ -1,17 +0,0 @@ -// Code generated by "core generate -add-types"; DO NOT EDIT. - -package randx - -import ( - "cogentcore.org/core/types" -) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/base/randx.Rand", IDName: "rand", Doc: "Rand provides an interface with most of the standard\nrand.Rand methods, to support the use of either the\nglobal rand generator or a separate Rand source.", Methods: []types.Method{{Name: "Seed", Doc: "Seed uses the provided seed value to initialize the generator to a deterministic state.\nSeed should not be called concurrently with any other Rand method.", Args: []string{"seed"}}, {Name: "Int63", Doc: "Int63 returns a non-negative pseudo-random 63-bit integer as an int64.", Returns: []string{"int64"}}, {Name: "Uint32", Doc: "Uint32 returns a pseudo-random 32-bit value as a uint32.", Returns: []string{"uint32"}}, {Name: "Uint64", Doc: "Uint64 returns a pseudo-random 64-bit value as a uint64.", Returns: []string{"uint64"}}, {Name: "Int31", Doc: "Int31 returns a non-negative pseudo-random 31-bit integer as an int32.", Returns: []string{"int32"}}, {Name: "Int", Doc: "Int returns a non-negative pseudo-random int.", Returns: []string{"int"}}, {Name: "Int63n", Doc: "Int63n returns, as an int64, a non-negative pseudo-random number in the half-open interval [0,n).\nIt panics if n <= 0.", Args: []string{"n"}, Returns: []string{"int64"}}, {Name: "Int31n", Doc: "Int31n returns, as an int32, a non-negative pseudo-random number in the half-open interval [0,n).\nIt panics if n <= 0.", Args: []string{"n"}, Returns: []string{"int32"}}, {Name: "Intn", Doc: "Intn returns, as an int, a non-negative pseudo-random number in the half-open interval [0,n).\nIt panics if n <= 0.", Args: []string{"n"}, Returns: []string{"int"}}, {Name: "Float64", Doc: "Float64 returns, as a float64, a pseudo-random number in the half-open interval [0.0,1.0).", Returns: []string{"float64"}}, {Name: "Float32", Doc: "Float32 returns, as a float32, a pseudo-random number in the half-open interval [0.0,1.0).", Returns: []string{"float32"}}, {Name: "NormFloat64", Doc: "NormFloat64 returns a normally distributed float64 in the range\n[-math.MaxFloat64, +math.MaxFloat64] with\nstandard normal distribution (mean = 0, stddev = 1)\nfrom the default Source.\nTo produce a different normal distribution, callers can\nadjust the output using:\n\n\tsample = NormFloat64() * desiredStdDev + desiredMean", Returns: []string{"float64"}}, {Name: "ExpFloat64", Doc: "ExpFloat64 returns an exponentially distributed float64 in the range\n(0, +math.MaxFloat64] with an exponential distribution whose rate parameter\n(lambda) is 1 and whose mean is 1/lambda (1) from the default Source.\nTo produce a distribution with a different rate parameter,\ncallers can adjust the output using:\n\n\tsample = ExpFloat64() / desiredRateParameter", Returns: []string{"float64"}}, {Name: "Perm", Doc: "Perm returns, as a slice of n ints, a pseudo-random permutation of the integers\nin the half-open interval [0,n).", Args: []string{"n"}, Returns: []string{"[]int"}}, {Name: "Shuffle", Doc: "Shuffle pseudo-randomizes the order of elements.\nn is the number of elements. Shuffle panics if n < 0.\nswap swaps the elements with indexes i and j.", Args: []string{"n", "swap"}}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/base/randx.SysRand", IDName: "sys-rand", Doc: "SysRand supports the system random number generator\nfor either a separate rand.Rand source, or, if that\nis nil, the global rand stream.", Fields: []types.Field{{Name: "Rand", Doc: "if non-nil, use this random number source instead of the global default one"}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/base/randx.RandParams", IDName: "rand-params", Doc: "RandParams provides parameterized random number generation according to different distributions\nand variance, mean params", Directives: []types.Directive{{Tool: "git", Directive: "add"}}, Fields: []types.Field{{Name: "Dist", Doc: "distribution to generate random numbers from"}, {Name: "Mean", Doc: "mean of random distribution -- typically added to generated random variants"}, {Name: "Var", Doc: "variability parameter for the random numbers (gauss = standard deviation, not variance; uniform = half-range, others as noted in RandDists)"}, {Name: "Par", Doc: "extra parameter for distribution (depends on each one)"}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/base/randx.RandDists", IDName: "rand-dists", Doc: "RandDists are different random number distributions"}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/base/randx.Seeds", IDName: "seeds", Doc: "Seeds is a set of random seeds, typically used one per Run"}) diff --git a/base/reflectx/pointers_test.go b/base/reflectx/pointers_test.go index 0238307a57..d4cff26ea0 100644 --- a/base/reflectx/pointers_test.go +++ b/base/reflectx/pointers_test.go @@ -244,14 +244,14 @@ func InitPointerTest() { pt.Mbr2 = 2 } -func FieldValue(obj any, fld reflect.StructField) reflect.Value { +func fieldValue(obj any, fld reflect.StructField) reflect.Value { ov := reflect.ValueOf(obj) f := unsafe.Pointer(ov.Pointer() + fld.Offset) nw := reflect.NewAt(fld.Type, f) return nw } -func SubFieldValue(obj any, fld reflect.StructField, sub reflect.StructField) reflect.Value { +func subFieldValue(obj any, fld reflect.StructField, sub reflect.StructField) reflect.Value { ov := reflect.ValueOf(obj) f := unsafe.Pointer(ov.Pointer() + fld.Offset + sub.Offset) nw := reflect.NewAt(sub.Type, f) @@ -263,7 +263,7 @@ func TestNewAt(t *testing.T) { InitPointerTest() typ := reflect.TypeOf(pt) fld, _ := typ.FieldByName("Mbr2") - vf := FieldValue(&pt, fld) + vf := fieldValue(&pt, fld) // fmt.Printf("Fld: %v Typ: %v vf: %v vfi: %v vfT: %v vfp: %v canaddr: %v canset: %v caninterface: %v\n", fld.Name, vf.Type().String(), vf.String(), vf.Interface(), vf.Interface(), vf.Interface(), vf.CanAddr(), vf.CanSet(), vf.CanInterface()) @@ -274,7 +274,7 @@ func TestNewAt(t *testing.T) { } fld, _ = typ.FieldByName("Mbr1") - vf = FieldValue(&pt, fld) + vf = fieldValue(&pt, fld) // fmt.Printf("Fld: %v Typ: %v vf: %v vfi: %v vfT: %v vfp: %v canaddr: %v canset: %v caninterface: %v\n", fld.Name, vf.Type().String(), vf.String(), vf.Interface(), vf.Interface(), vf.Interface(), vf.CanAddr(), vf.CanSet(), vf.CanInterface()) diff --git a/base/reflectx/structs.go b/base/reflectx/structs.go index e647b944bd..f7c4a486d6 100644 --- a/base/reflectx/structs.go +++ b/base/reflectx/structs.go @@ -229,3 +229,81 @@ func StructTags(tags reflect.StructTag) map[string]string { func StringJSON(v any) string { return string(errors.Log1(jsonx.WriteBytesIndent(v))) } + +// FieldByPath returns the [reflect.Value] of given field within given struct value, +// where the field can be a path with . separators, for fields within struct fields. +func FieldByPath(s reflect.Value, fieldPath string) (reflect.Value, error) { + sv := Underlying(s) + if sv.Kind() != reflect.Struct { + return reflect.Value{}, errors.New("reflectx.FieldByPath: kind is not struct") + } + fps := strings.Split(fieldPath, ".") + for _, fp := range fps { + fv := sv.FieldByName(fp) + if !fv.IsValid() { + return reflect.Value{}, errors.New("reflectx.FieldByPath: field name not found: " + fp) + } + sv = fv + } + return sv.Addr(), nil +} + +// CopyFields copies the named fields from src struct into dest struct. +// Fields can be paths with . separators for sub-fields of fields. +func CopyFields(dest, src any, fields ...string) error { + dsv := Underlying(reflect.ValueOf(dest)) + if dsv.Kind() != reflect.Struct { + return errors.New("reflectx.CopyFields: destination kind is not struct") + } + ssv := Underlying(reflect.ValueOf(src)) + if ssv.Kind() != reflect.Struct { + return errors.New("reflectx.CopyFields: source kind is not struct") + } + var errs []error + for _, f := range fields { + dfv, err := FieldByPath(dsv, f) + if err != nil { + errs = append(errs, err) + continue + } + sfv, err := FieldByPath(ssv, f) + if err != nil { + errs = append(errs, err) + continue + } + err = SetRobust(PointerValue(dfv).Interface(), sfv.Interface()) + if err != nil { + errs = append(errs, err) + continue + } + } + return errors.Join(errs...) +} + +// SetFieldsFromMap sets given map[string]any values to fields of given object, +// where the map keys are field paths (with . delimiters for sub-field paths). +// The value can be any appropriate type that applies to the given field. +// It prints a message if a parameter fails to be set, and returns an error. +func SetFieldsFromMap(obj any, vals map[string]any) error { + objv := reflect.ValueOf(obj) + npv := NonPointerValue(objv) + if npv.Kind() == reflect.Map { + err := CopyMapRobust(obj, vals) + if errors.Log(err) != nil { + return err + } + } + var errs []error + for k, v := range vals { + fld, err := FieldByPath(objv, k) + if err != nil { + errs = append(errs, err) + } + err = SetRobust(fld.Interface(), v) + if err != nil { + err = errors.Log(fmt.Errorf("SetFieldsFromMap: was not able to apply value: %v to field: %s", v, k)) + errs = append(errs, err) + } + } + return errors.Join(errs...) +} diff --git a/base/reflectx/structs_test.go b/base/reflectx/structs_test.go index e42b07f175..05a479c4d7 100644 --- a/base/reflectx/structs_test.go +++ b/base/reflectx/structs_test.go @@ -5,8 +5,12 @@ package reflectx import ( + "image" "reflect" "testing" + + "cogentcore.org/core/colors" + "github.com/stretchr/testify/assert" ) type person struct { @@ -53,3 +57,64 @@ func TestNonDefaultFields(t *testing.T) { t.Errorf("expected\n%v\n\tbut got\n%v", want, have) } } + +type imgfield struct { + Mycolor image.Image +} + +func TestCopyFields(t *testing.T) { + sp := &person{ + Name: "Go Gopher", + Age: 23, + ProgrammingLanguage: "Go", + FavoriteFruit: "Peach", + Data: "abcdef", + Pet: pet{ + Name: "Pet Gopher", + Type: "Dog", + Age: 7, + }, + } + dp := &person{} + CopyFields(dp, sp, "Name", "Pet.Age") + assert.Equal(t, sp.Name, dp.Name) + assert.Equal(t, sp.Pet.Age, dp.Pet.Age) + + sif := &imgfield{ + Mycolor: colors.Uniform(colors.Black), + } + dif := &imgfield{} + CopyFields(dif, sif, "Mycolor") + assert.Equal(t, sif.Mycolor, dif.Mycolor) +} + +func TestFieldByPath(t *testing.T) { + sp := &person{ + Name: "Go Gopher", + Age: 23, + ProgrammingLanguage: "Go", + FavoriteFruit: "Peach", + Data: "abcdef", + Pet: pet{ + Name: "Pet Gopher", + Type: "Dog", + Age: 7, + }, + } + spv := reflect.ValueOf(sp) + fv, err := FieldByPath(spv, "Pet.Age") + assert.NoError(t, err) + assert.Equal(t, 7, fv.Elem().Interface()) + fv, err = FieldByPath(spv, "Pet.Name") + assert.NoError(t, err) + assert.Equal(t, "Pet Gopher", fv.Elem().Interface()) + fv, err = FieldByPath(spv, "Pet.Ages") + assert.Error(t, err) + fv, err = FieldByPath(spv, "Pets.Age") + assert.Error(t, err) + + err = SetFieldsFromMap(sp, map[string]any{"Pet.Age": 8, "Data": "ddd"}) + assert.NoError(t, err) + assert.Equal(t, 8, sp.Pet.Age) + assert.Equal(t, "ddd", sp.Data) +} diff --git a/base/reflectx/values.go b/base/reflectx/values.go index ebadbbbef6..6c03b40921 100644 --- a/base/reflectx/values.go +++ b/base/reflectx/values.go @@ -47,6 +47,18 @@ func KindIsNumber(vk reflect.Kind) bool { return vk >= reflect.Int && vk <= reflect.Complex128 } +// KindIsInt returns whether the given [reflect.Kind] is an int +// type such as int, int32 etc. +func KindIsInt(vk reflect.Kind) bool { + return vk >= reflect.Int && vk <= reflect.Uintptr +} + +// KindIsFloat returns whether the given [reflect.Kind] is a +// float32 or float64. +func KindIsFloat(vk reflect.Kind) bool { + return vk >= reflect.Float32 && vk <= reflect.Float64 +} + // ToBool robustly converts to a bool any basic elemental type // (including pointers to such) using a big type switch organized // for greatest efficiency. It tries the [bools.Booler] @@ -941,7 +953,15 @@ func SetRobust(to, from any) error { rto := reflect.ValueOf(to) pto := UnderlyingPointer(rto) if IsNil(pto) { - return fmt.Errorf("got nil destination value") + // If the original value is a non-nil pointer, we can just use it + // even though the underlying pointer is nil (this happens when there + // is a pointer to a nil pointer; see #1365). + if !IsNil(rto) && rto.Kind() == reflect.Pointer { + pto = rto + } else { + // Otherwise, we cannot recover any meaningful value. + return fmt.Errorf("got nil destination value") + } } pito := pto.Interface() @@ -951,6 +971,16 @@ func SetRobust(to, from any) error { return fmt.Errorf("destination value cannot be set; it must be a variable or field, not a const or tmp or other value that cannot be set (value: %v of type %T)", pto, pto) } + // images should not be copied per content: just set the pointer! + // otherwise the original images (esp colors!) are altered. + // TODO: #1394 notes the more general ambiguity about deep vs. shallow pointer copy. + if img, ok := to.(*image.Image); ok { + if fimg, ok := from.(image.Image); ok { + *img = fimg + return nil + } + } + // first we do the generic AssignableTo case if rto.Kind() == reflect.Pointer { fv := reflect.ValueOf(from) diff --git a/base/reflectx/values_test.go b/base/reflectx/values_test.go index 6a26f7bfdb..b14936b627 100644 --- a/base/reflectx/values_test.go +++ b/base/reflectx/values_test.go @@ -146,6 +146,11 @@ func TestPointerSetRobust(t *testing.T) { t.Errorf(err.Error()) } assert.Equal(t, aptr, bptr) + + aptr = nil // also must work if dest pointer is nil + err = SetRobust(&aptr, bptr) + assert.NoError(t, err) + assert.Equal(t, aptr, bptr) } func BenchmarkFloatToFloat(b *testing.B) { diff --git a/base/stringsx/stringsx.go b/base/stringsx/stringsx.go index 79dc0941ca..393d65b0d7 100644 --- a/base/stringsx/stringsx.go +++ b/base/stringsx/stringsx.go @@ -8,6 +8,7 @@ package stringsx import ( "bytes" + "slices" "strings" ) @@ -88,3 +89,18 @@ func InsertFirstUnique(strs *[]string, str string, max int) { (*strs)[0] = str } } + +// UniqueList removes duplicates from given string list, +// preserving the order. +func UniqueList(strs []string) []string { + n := len(strs) + for i := n - 1; i >= 0; i-- { + p := strs[i] + for j, s := range strs { + if p == s && i != j { + strs = slices.Delete(strs, i, i+1) + } + } + } + return strs +} diff --git a/cli/cli.go b/cli/cli.go index 992245faf4..11f71aa3b6 100644 --- a/cli/cli.go +++ b/cli/cli.go @@ -40,6 +40,9 @@ func Run[T any, C CmdOrFunc[T]](opts *Options, cfg T, cmds ...C) error { } return err } + if len(cmds) == 1 { // one command is always the root + cs[0].Root = true + } cmd, err := config(opts, cfg, cs...) if err != nil { if opts.Fatal { diff --git a/core/bars.go b/core/bars.go index afa2d491b6..3910d93a19 100644 --- a/core/bars.go +++ b/core/bars.go @@ -114,8 +114,7 @@ func (sc *Scene) addDefaultBars() { } } -////////////////////////////////////////////////////////////// -// Scene wrappers +//////// Scene wrappers // AddTopBar adds the given function for configuring a control bar // at the top of the window diff --git a/core/filepicker.go b/core/filepicker.go index f3c1406b0b..69a48f2a16 100644 --- a/core/filepicker.go +++ b/core/filepicker.go @@ -21,6 +21,7 @@ import ( "cogentcore.org/core/base/elide" "cogentcore.org/core/base/errors" "cogentcore.org/core/base/fileinfo" + "cogentcore.org/core/base/fsx" "cogentcore.org/core/colors" "cogentcore.org/core/cursors" "cogentcore.org/core/events" @@ -705,7 +706,7 @@ func (fp *FilePicker) editRecentPaths() { // Filename is used to specify an file path. // It results in a [FileButton] [Value]. -type Filename string +type Filename = fsx.Filename // FileButton represents a [Filename] value with a button // that opens a [FilePicker]. diff --git a/core/form.go b/core/form.go index 9785dbfd56..b15748ede3 100644 --- a/core/form.go +++ b/core/form.go @@ -30,6 +30,11 @@ type Form struct { // Inline is whether to display the form in one line. Inline bool + // Modified optionally highlights and tracks fields that have been modified + // through an OnChange event. If present, it replaces the default value highlighting + // and resetting logic. Ignored if nil. + Modified map[string]bool + // structFields are the fields of the current struct. structFields []*structField @@ -179,13 +184,20 @@ func (fm *Form) Init() { // (see https://github.com/cogentcore/core/issues/1098). doc, _ := types.GetDoc(f.value, f.parent, f.field, label) w.SetTooltip(doc) - if hasDef { - w.SetTooltip("(Default: " + def + ") " + w.Tooltip) + if hasDef || fm.Modified != nil { + if hasDef { + w.SetTooltip("(Default: " + def + ") " + w.Tooltip) + } var isDef bool w.Styler(func(s *styles.Style) { f := fm.structFields[i] - isDef = reflectx.ValueIsDefault(f.value, def) dcr := "(Double click to reset to default) " + if fm.Modified != nil { + isDef = !fm.Modified[f.path] + dcr = "(Double click to mark as not modified) " + } else { + isDef = reflectx.ValueIsDefault(f.value, def) + } if !isDef { s.Color = colors.Scheme.Primary.Base s.Cursor = cursors.Poof @@ -202,13 +214,20 @@ func (fm *Form) Init() { return } e.SetHandled() - err := reflectx.SetFromDefaultTag(f.value, def) + var err error + if fm.Modified != nil { + fm.Modified[f.path] = false + } else { + err = reflectx.SetFromDefaultTag(f.value, def) + } if err != nil { ErrorSnackbar(w, err, "Error setting default value") } else { w.Update() valueWidget.AsWidget().Update() - valueWidget.AsWidget().SendChange(e) + if fm.Modified == nil { + valueWidget.AsWidget().SendChange(e) + } } }) } @@ -243,8 +262,11 @@ func (fm *Form) Init() { }) if !fm.IsReadOnly() && !readOnlyTag { wb.OnChange(func(e events.Event) { + if fm.Modified != nil { + fm.Modified[f.path] = true + } fm.SendChange(e) - if hasDef { + if hasDef || fm.Modified != nil { labelWidget.Update() } if fm.isShouldDisplayer { diff --git a/core/form_test.go b/core/form_test.go index 91942c95e8..74b5f0a985 100644 --- a/core/form_test.go +++ b/core/form_test.go @@ -7,6 +7,7 @@ package core import ( "testing" + "cogentcore.org/core/colors" "cogentcore.org/core/events" "cogentcore.org/core/styles" "cogentcore.org/core/styles/abilities" @@ -73,3 +74,29 @@ func TestFormStyle(t *testing.T) { NewForm(b).SetStruct(s) b.AssertRender(t, "form/style") } + +type giveUpParams struct { + ProbThr float32 + MinGiveUpSum float32 + Utility float32 + Timing float32 + Progress float32 + MinUtility float32 + ProgressRateTau float32 + ProgressRateDt float32 +} + +type addFields struct { + GiveUp giveUpParams `display:"add-fields"` +} + +func TestFormAddFields(t *testing.T) { + b := NewBody() + b.Styler(func(s *styles.Style) { + s.Min.X.Ch(100) + }) + NewForm(b).SetStruct(&addFields{}).Styler(func(s *styles.Style) { + s.Background = colors.Scheme.SurfaceContainerLow + }) + b.AssertRender(t, "form/addfields") +} diff --git a/core/layout.go b/core/layout.go index e8d938bd4a..9e3e6ecab9 100644 --- a/core/layout.go +++ b/core/layout.go @@ -140,8 +140,8 @@ type Layouter interface { SetScrollParams(d math32.Dims, sb *Slider) } -// AsFrame returns the given value as a value of type [Frame] if the type -// of the given value embeds [Frame], or nil otherwise. +// AsFrame returns the given value as a [Frame] if it implements [Layouter] +// or nil otherwise. func AsFrame(n tree.Node) *Frame { if t, ok := n.(Layouter); ok { return t.AsFrame() @@ -149,7 +149,6 @@ func AsFrame(n tree.Node) *Frame { return nil } -// AsFrame satisfies the [Layouter] interface. func (t *Frame) AsFrame() *Frame { return t } diff --git a/core/list.go b/core/list.go index 707d9f7f5e..ca80541756 100644 --- a/core/list.go +++ b/core/list.go @@ -90,7 +90,7 @@ type Lister interface { // SliceIndex returns the logical slice index: si = i + StartIndex, // the actual value index vi into the slice value (typically = si), // which can be different if there is an index indirection as in - // tensorcore table.IndexView), and a bool that is true if the + // tensorcore.Table), and a bool that is true if the // index is beyond the available data and is thus invisible, // given the row index provided. SliceIndex(i int) (si, vi int, invis bool) @@ -817,7 +817,7 @@ func (lb *ListBase) MakeToolbar(p *tree.Plan) { }) } -//////////////////////////////////////////////////////////// +//////// // Row access methods // NOTE: row = physical GUI display row, idx = slice index // not the same! @@ -1062,8 +1062,7 @@ func (lb *ListBase) movePageUpEvent(selMode events.SelectModes) int { return nidx } -////////////////////////////////////////////////////////// -// Selection: user operates on the index labels +//////// Selection: user operates on the index labels // updateSelectRow updates the selection for the given row func (lb *ListBase) updateSelectRow(row int, selMode events.SelectModes) { @@ -1251,8 +1250,7 @@ func (lb *ListBase) unselectIndexEvent(idx int) { } } -/////////////////////////////////////////////////// -// Copy / Cut / Paste +//////// Copy / Cut / Paste // mimeDataIndex adds mimedata for given idx: an application/json of the struct func (lb *ListBase) mimeDataIndex(md *mimedata.Mimes, idx int) { @@ -1438,8 +1436,7 @@ func (lb *ListBase) duplicate() int { //types:add return pasteAt } -////////////////////////////////////////////////////////////////////////////// -// Drag-n-Drop +//////// Drag-n-Drop // selectRowIfNone selects the row the mouse is on if there // are no currently selected items. Returns false if no valid mouse row. diff --git a/core/render.go b/core/render.go index 5eec22c456..dff2d2bbaa 100644 --- a/core/render.go +++ b/core/render.go @@ -160,8 +160,7 @@ func (wb *WidgetBase) doNeedsRender() { }) } -////////////////////////////////////////////////////////////////// -// Scene +//////// Scene var sceneShowIters = 2 @@ -274,8 +273,7 @@ func (sc *Scene) contentSize(initSz image.Point) image.Point { return psz.ToPointFloor() } -////////////////////////////////////////////////////////////////// -// Widget local rendering +//////// Widget local rendering // PushBounds pushes our bounding box bounds onto the bounds stack // if they are non-empty. This automatically limits our drawing to diff --git a/core/renderwindow.go b/core/renderwindow.go index 0ba70945ea..d9b939ff6c 100644 --- a/core/renderwindow.go +++ b/core/renderwindow.go @@ -284,6 +284,8 @@ func (w *renderWindow) resized() { if DebugSettings.WindowEventTrace { fmt.Printf("Win: %v skipped same-size Resized: %v\n", w.name, curRg) } + rc.logicalDPI = w.logicalDPI() + w.mains.resize(rg) // no-op if everyone below is good // still need to apply style even if size is same for _, kv := range w.mains.stack.Order { st := kv.Value diff --git a/core/scene.go b/core/scene.go index 49fa2e6a0a..52a72359ce 100644 --- a/core/scene.go +++ b/core/scene.go @@ -210,6 +210,9 @@ func (sc *Scene) Init() { currentRenderWindow.SetStageTitle(st.Title) }) sc.Updater(func() { + if TheApp.Platform() == system.Offscreen { + return + } // At the scene level, we reset the shortcuts and add our context menu // shortcuts every time. This clears the way for buttons to add their // shortcuts in their own Updaters. We must get the shortcuts every time diff --git a/core/settings.go b/core/settings.go index f46726786b..ccd0db4450 100644 --- a/core/settings.go +++ b/core/settings.go @@ -645,8 +645,7 @@ type EditorSettings struct { //types:add DepthColor bool `default:"true"` } -////////////////////////////////////////////////////////////////// -// FavoritePaths +//////// FavoritePaths // favoritePathItem represents one item in a favorite path list, for display of // favorites. Is an ordered list instead of a map because user can organize @@ -696,8 +695,7 @@ var defaultPaths = favoritePaths{ {icons.Computer, "root", "/"}, } -////////////////////////////////////////////////////////////////// -// FilePaths +//////// FilePaths // FilePaths represents a set of file paths. type FilePaths []string @@ -746,8 +744,7 @@ func openRecentPaths() { } } -////////////////////////////////////////////////////////////////// -// DebugSettings +//////// DebugSettings // DebugSettings are the currently active debugging settings var DebugSettings = &DebugSettingsData{ diff --git a/core/tabs.go b/core/tabs.go index a29530e40a..eeb55cb2fa 100644 --- a/core/tabs.go +++ b/core/tabs.go @@ -20,6 +20,12 @@ import ( "cogentcore.org/core/tree" ) +// Tabber is an interface for getting the parent Tabs of tab buttons. +type Tabber interface { + // AsCoreTabs returns the underlying Tabs implementation. + AsCoreTabs() *Tabs +} + // Tabs divide widgets into logical groups and give users the ability // to freely navigate between them using tab buttons. type Tabs struct { @@ -101,6 +107,8 @@ func (tt TabTypes) isColumn() bool { return tt == NavigationDrawer } +func (ts *Tabs) AsCoreTabs() *Tabs { return ts } + func (ts *Tabs) Init() { ts.Frame.Init() ts.maxChars = 16 @@ -544,5 +552,8 @@ func (tb *Tab) Init() { // tabs returns the parent [Tabs] of this [Tab]. func (tb *Tab) tabs() *Tabs { - return tb.Parent.AsTree().Parent.(*Tabs) + if tbr, ok := tb.Parent.AsTree().Parent.(Tabber); ok { + return tbr.AsCoreTabs() + } + return nil } diff --git a/core/textfield.go b/core/textfield.go index c5f4c77486..525f721d28 100644 --- a/core/textfield.go +++ b/core/textfield.go @@ -585,8 +585,7 @@ func (tf *TextField) WidgetTooltip(pos image.Point) (string, image.Point) { return tf.error.Error(), tf.DefaultTooltipPos() } -////////////////////////////////////////////////////////////////////////////////////////// -// Cursor Navigation +//////// Cursor Navigation // cursorForward moves the cursor forward func (tf *TextField) cursorForward(steps int) { @@ -856,8 +855,7 @@ func (tf *TextField) cursorKill() { tf.cursorDelete(steps) } -/////////////////////////////////////////////////////////////////////////////// -// Selection +//////// Selection // clearSelected resets both the global selected flag and any current selection func (tf *TextField) clearSelected() { @@ -1098,8 +1096,7 @@ func (tf *TextField) contextMenu(m *Scene) { } } -/////////////////////////////////////////////////////////////////////////////// -// Undo +//////// Undo // textFieldUndoRecord holds one undo record type textFieldUndoRecord struct { @@ -1190,8 +1187,7 @@ func (tf *TextField) redo() { } } -/////////////////////////////////////////////////////////////////////////////// -// Complete +//////// Complete // SetCompleter sets completion functions so that completions will // automatically be offered as the user types. @@ -1241,8 +1237,7 @@ func (tf *TextField) completeText(s string) { tf.editDone() } -/////////////////////////////////////////////////////////////////////////////// -// Rendering +//////// Rendering // hasWordWrap returns true if the layout is multi-line word wrapping func (tf *TextField) hasWordWrap() bool { @@ -1461,7 +1456,7 @@ func (tf *TextField) autoScroll() { availSz := sz.Actual.Content.Sub(icsz) tf.configTextSize(availSz) n := len(tf.editText) - tf.cursorPos = math32.ClampInt(tf.cursorPos, 0, n) + tf.cursorPos = math32.Clamp(tf.cursorPos, 0, n) if tf.hasWordWrap() { // does not scroll tf.startPos = 0 diff --git a/core/tree.go b/core/tree.go index 5b06a3f728..88ad0cc07f 100644 --- a/core/tree.go +++ b/core/tree.go @@ -60,8 +60,8 @@ type Treer interface { //types:add DropDeleteSource(e events.Event) } -// AsTree returns the given value as a value of type [Tree] if the type -// of the given value embeds [Tree], or nil otherwise. +// AsTree returns the given value as a [Tree] if it has +// an AsCoreTree() method, or nil otherwise. func AsTree(n tree.Node) *Tree { if t, ok := n.(Treer); ok { return t.AsCoreTree() @@ -123,7 +123,7 @@ type Tree struct { // with each child tree node when it is initialized. It is only // called with the root node itself in [Tree.SetTreeInit], so you // should typically call that instead of setting this directly. - TreeInit func(tr *Tree) `set:"-"` + TreeInit func(tr *Tree) `set:"-" json:"-" xml:"-"` // Indent is the amount to indent children relative to this node. // It should be set in a Styler like all other style properties. @@ -151,8 +151,8 @@ type Tree struct { // our alloc includes all of our children, but we only draw us. widgetSize math32.Vector2 - // root is the cached root of the tree. It is automatically set. - root *Tree + // Root is the cached root of the tree. It is automatically set. + Root Treer `copier:"-" json:"-" xml:"-" edit:"-" set:"-"` // SelectedNodes holds the currently selected nodes. // It is only set on the root node. See [Tree.GetSelectedNodes] @@ -186,7 +186,9 @@ func (tr *Tree) rootSetViewIndex() int { tvn := AsTree(cw) if tvn != nil { tvn.viewIndex = idx - tvn.root = tr + if tvn.Root == nil { + tvn.Root = tr + } idx++ } return tree.Continue @@ -480,15 +482,18 @@ func (tr *Tree) OnAdd() { tr.WidgetBase.OnAdd() tr.Text = tr.Name if ptv := AsTree(tr.Parent); ptv != nil { - tr.root = ptv.root + tr.Root = ptv.Root tr.IconOpen = ptv.IconOpen tr.IconClosed = ptv.IconClosed tr.IconLeaf = ptv.IconLeaf } else { - tr.root = tr + if tr.Root == nil { + tr.Root = tr + } } - if tr.root.TreeInit != nil { - tr.root.TreeInit(tr) + troot := tr.Root.AsCoreTree() + if troot.TreeInit != nil { + troot.TreeInit(tr) } } @@ -507,10 +512,10 @@ func (tr *Tree) SetTreeInit(v func(tr *Tree)) *Tree { // which is what controls the functional inactivity of the tree // if individual nodes are ReadOnly that only affects display typically. func (tr *Tree) rootIsReadOnly() bool { - if tr.root == nil { + if tr.Root == nil { return true } - return tr.root.IsReadOnly() + return tr.Root.AsCoreTree().IsReadOnly() } func (tr *Tree) Style() { @@ -549,8 +554,8 @@ func (tr *Tree) SizeUp() { tr.widgetSize = tr.Geom.Size.Actual.Total h := tr.widgetSize.Y w := tr.widgetSize.X - if tr.root.This == tr.This { // do it every time on root - tr.root.rootSetViewIndex() + if tr.IsRoot() { // do it every time on root + tr.rootSetViewIndex() } if !tr.Closed { @@ -583,11 +588,11 @@ func (tr *Tree) SizeDown(iter int) bool { } func (tr *Tree) Position() { - rn := tr.root - if rn == nil { + if tr.Root == nil { slog.Error("core.Tree: RootView is nil", "in node:", tr) return } + rn := tr.Root.AsCoreTree() tr.setBranchState() sz := &tr.Geom.Size sz.Actual.Total.X = rn.Geom.Size.Actual.Total.X - (tr.Geom.Pos.Total.X - rn.Geom.Pos.Total.X) @@ -658,26 +663,26 @@ func (tr *Tree) RenderWidget() { } } -////////////////////////////////////////////////////////////////////////////// -// Selection +//////// Selection // GetSelectedNodes returns a slice of the currently selected // Trees within the entire tree, using a list maintained // by the root node. func (tr *Tree) GetSelectedNodes() []Treer { - if tr.root == nil { + if tr.Root == nil { return nil } - if len(tr.root.SelectedNodes) == 0 { - return tr.root.SelectedNodes + rn := tr.Root.AsCoreTree() + if len(rn.SelectedNodes) == 0 { + return rn.SelectedNodes } - return tr.root.SelectedNodes + return rn.SelectedNodes } // SetSelectedNodes updates the selected nodes on the root node to the given list. func (tr *Tree) SetSelectedNodes(sl []Treer) { - if tr.root != nil { - tr.root.SelectedNodes = sl + if tr.Root != nil { + tr.Root.AsCoreTree().SelectedNodes = sl } } @@ -743,7 +748,7 @@ func (tr *Tree) SelectAll() { return } tr.UnselectAll() - nn := tr.root + nn := tr.Root.AsCoreTree() nn.Select() for nn != nil { nn = nn.moveDown(events.SelectQuiet) @@ -830,26 +835,27 @@ func (tr *Tree) selectUpdate(mode events.SelectModes) bool { // sendSelectEvent sends an [events.Select] event on both this node and the root node. func (tr *Tree) sendSelectEvent(original ...events.Event) { - if tr.This != tr.root.This { + if !tr.IsRoot() { tr.Send(events.Select, original...) } - tr.root.Send(events.Select, original...) + tr.Root.AsCoreTree().Send(events.Select, original...) } // sendChangeEvent sends an [events.Change] event on both this node and the root node. func (tr *Tree) sendChangeEvent(original ...events.Event) { - if tr.This != tr.root.This { + if !tr.IsRoot() { tr.SendChange(original...) } - tr.root.SendChange(original...) + tr.Root.AsCoreTree().SendChange(original...) } // sendChangeEventReSync sends an [events.Change] event on the RootView node. // If SyncNode != nil, it also does a re-sync from root. func (tr *Tree) sendChangeEventReSync(original ...events.Event) { tr.sendChangeEvent(original...) - if tr.root.SyncNode != nil { - tr.root.Resync() + rn := tr.Root.AsCoreTree() + if rn.SyncNode != nil { + rn.Resync() } } @@ -873,8 +879,7 @@ func (tr *Tree) UnselectEvent() { } } -////////////////////////////////////////////////////////////////////////////// -// Moving +//////// Moving // moveDown moves the selection down to next element in the tree, // using given select mode (from keyboard modifiers). @@ -916,7 +921,7 @@ func (tr *Tree) moveDownSibling(selMode events.SelectModes) *Tree { if tr.Parent == nil { return nil } - if tr == tr.root { + if tr == tr.Root { return nil } myidx := tr.IndexInParent() @@ -936,7 +941,7 @@ func (tr *Tree) moveDownSibling(selMode events.SelectModes) *Tree { // using given select mode (from keyboard modifiers). // Returns newly selected node func (tr *Tree) moveUp(selMode events.SelectModes) *Tree { - if tr.Parent == nil || tr == tr.root { + if tr.Parent == nil || tr == tr.Root { return nil } myidx := tr.IndexInParent() @@ -1038,7 +1043,7 @@ func (tr *Tree) movePageDownEvent(selMode events.SelectModes) *Tree { // moveToLastChild moves to the last child under me, using given select mode // (from keyboard modifiers) func (tr *Tree) moveToLastChild(selMode events.SelectModes) *Tree { - if tr.Parent == nil || tr == tr.root { + if tr.Parent == nil || tr == tr.Root { return nil } if !tr.Closed && tr.HasChildren() { @@ -1054,11 +1059,12 @@ func (tr *Tree) moveToLastChild(selMode events.SelectModes) *Tree { // using given select mode (from keyboard modifiers) // and emits select event for newly selected item func (tr *Tree) moveHomeEvent(selMode events.SelectModes) *Tree { - tr.root.selectUpdate(selMode) - tr.root.SetFocusQuiet() - tr.root.ScrollToThis() - tr.root.sendSelectEvent() - return tr.root + rn := tr.Root.AsCoreTree() + rn.selectUpdate(selMode) + rn.SetFocusQuiet() + rn.ScrollToThis() + rn.sendSelectEvent() + return rn } // moveEndEvent moves the selection to the very last node in the tree, @@ -1195,8 +1201,7 @@ func (tr *Tree) OpenParents() { tr.NeedsLayout() } -///////////////////////////////////////////////////////////// -// Modifying Source Tree +//////// Modifying Source Tree func (tr *Tree) ContextMenuPos(e events.Event) (pos image.Point) { if e != nil { @@ -1249,18 +1254,17 @@ func (tr *Tree) contextMenu(m *Scene) { // IsRoot returns true if given node is the root of the tree, // creating an error snackbar if it is and action is non-empty. -func (tr *Tree) IsRoot(action string) bool { - if tr.This == tr.root.This { - if action != "" { - MessageSnackbar(tr, fmt.Sprintf("Cannot %v the root of the tree", action)) +func (tr *Tree) IsRoot(action ...string) bool { + if tr.This == tr.Root.AsCoreTree().This { + if len(action) > 0 { + MessageSnackbar(tr, fmt.Sprintf("Cannot %v the root of the tree", action[0])) } return true } return false } -//////////////////////////////////////////////////////////// -// Copy / Cut / Paste +//////// Copy / Cut / Paste // MimeData adds mimedata for this node: a text/plain of the Path. func (tr *Tree) MimeData(md *mimedata.Mimes) { @@ -1268,7 +1272,7 @@ func (tr *Tree) MimeData(md *mimedata.Mimes) { tr.mimeDataSync(md) return } - *md = append(*md, mimedata.NewTextData(tr.PathFrom(tr.root))) + *md = append(*md, mimedata.NewTextData(tr.PathFrom(tr.Root.AsCoreTree()))) var buf bytes.Buffer err := jsonx.Write(tr.This, &buf) if err == nil { @@ -1326,13 +1330,13 @@ func (tr *Tree) Cut() { //types:add } tr.Copy() sels := tr.GetSelectedNodes() - root := tr.root + rn := tr.Root.AsCoreTree() tr.UnselectAll() for _, sn := range sels { sn.AsTree().Delete() } - root.Update() - root.sendChangeEvent() + rn.Update() + rn.sendChangeEvent() } // Paste pastes clipboard at given node. @@ -1370,7 +1374,7 @@ func (tr *Tree) makePasteMenu(m *Scene, md mimedata.Mimes, fun func()) { fun() } }) - if !tr.IsRoot("") && tr.root.This != tr.This { + if !tr.IsRoot() { NewButton(m).SetText("Insert Before").OnClick(func(e events.Event) { tr.pasteBefore(md, events.DropCopy) if fun != nil { @@ -1459,10 +1463,10 @@ func (tr *Tree) pasteAt(md mimedata.Mimes, mod events.DropMods, rel int, actNm s parent.InsertChild(ns, myidx+i) nwb := AsWidget(ns) ntv := AsTree(ns) - ntv.root = tr.root + ntv.Root = tr.Root nwb.setScene(tr.Scene) nwb.Update() // incl children - npath := ns.AsTree().PathFrom(tr.root) + npath := ns.AsTree().PathFrom(tr.Root) if mod == events.DropMove && npath == orgpath { // we will be nuked immediately after drag ns.AsTree().SetName(ns.AsTree().Name + treeTempMovedTag) // special keyword :) } @@ -1490,7 +1494,7 @@ func (tr *Tree) pasteChildren(md mimedata.Mimes, mod events.DropMods) { tr.AddChild(ns) nwb := AsWidget(ns) ntv := AsTree(ns) - ntv.root = tr.root + ntv.Root = tr.Root nwb.setScene(tr.Scene) } tr.Update() @@ -1498,8 +1502,7 @@ func (tr *Tree) pasteChildren(md mimedata.Mimes, mod events.DropMods) { tr.sendChangeEvent() } -////////////////////////////////////////////////////////////////////////////// -// Drag-n-Drop +//////// Drag-n-Drop // dragStart starts a drag-n-drop on this node -- it includes any other // selected nodes as well, each as additional records in mimedata. @@ -1568,17 +1571,17 @@ func (tr *Tree) DropDeleteSource(e events.Event) { return } md := de.Data.(mimedata.Mimes) - root := tr.root + rn := tr.Root.AsCoreTree() for _, d := range md { if d.Type != fileinfo.TextPlain { // link continue } path := string(d.Data) - sn := root.FindPath(path) + sn := rn.FindPath(path) if sn != nil { sn.AsTree().Delete() } - sn = root.FindPath(path + treeTempMovedTag) + sn = rn.FindPath(path + treeTempMovedTag) if sn != nil { psplt := strings.Split(path, "/") orgnm := psplt[len(psplt)-1] diff --git a/core/treesync.go b/core/treesync.go index 1cda830ea7..6eebb22df1 100644 --- a/core/treesync.go +++ b/core/treesync.go @@ -62,13 +62,13 @@ func (tr *Tree) Resync() { func (tr *Tree) syncToSrc(tvIndex *int, init bool, depth int) { sn := tr.SyncNode // root must keep the same name for continuity with surrounding context - if tr != tr.root { + if tr != tr.Root { nm := "tv_" + sn.AsTree().Name tr.SetName(nm) } tr.viewIndex = *tvIndex *tvIndex++ - if init && depth >= tr.root.OpenDepth { + if init && depth >= tr.Root.AsCoreTree().OpenDepth { tr.SetClosed(true) } skids := sn.AsTree().Children @@ -377,7 +377,7 @@ func (tr *Tree) inspectNode() { //types:add // mimeDataSync adds mimedata for this node: a text/plain of the Path, // and an application/json of the sync node. func (tr *Tree) mimeDataSync(md *mimedata.Mimes) { - sroot := tr.root.SyncNode + sroot := tr.Root.AsCoreTree().SyncNode src := tr.SyncNode *md = append(*md, mimedata.NewTextData(src.AsTree().PathFrom(sroot))) var buf bytes.Buffer @@ -435,7 +435,7 @@ func (tr *Tree) pasteAtSync(md mimedata.Mimes, mod events.DropMods, rel int, act return } myidx += rel - sroot := tr.root.SyncNode + sroot := tr.Root.AsCoreTree().SyncNode sz := len(sl) var seln tree.Node for i, ns := range sl { @@ -488,7 +488,7 @@ func (tr *Tree) cutSync() { // dropDeleteSourceSync handles delete source event for DropMove case, for Sync func (tr *Tree) dropDeleteSourceSync(de *events.DragDrop) { md := de.Data.(mimedata.Mimes) - sroot := tr.root.SyncNode + sroot := tr.Root.AsCoreTree().SyncNode for _, d := range md { if d.Type != fileinfo.TextPlain { // link continue diff --git a/core/typegen.go b/core/typegen.go index 0d3a07e588..0ffc6c3b35 100644 --- a/core/typegen.go +++ b/core/typegen.go @@ -237,7 +237,7 @@ func (t *FileButton) SetFilename(v string) *FileButton { t.Filename = v; return // Extensions are the target file extensions for the file picker. func (t *FileButton) SetExtensions(v string) *FileButton { t.Extensions = v; return t } -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/core.Form", IDName: "form", Doc: "Form represents a struct with rows of field names and editable values.", Embeds: []types.Field{{Name: "Frame"}}, Fields: []types.Field{{Name: "Struct", Doc: "Struct is the pointer to the struct that we are viewing."}, {Name: "Inline", Doc: "Inline is whether to display the form in one line."}, {Name: "structFields", Doc: "structFields are the fields of the current struct."}, {Name: "isShouldDisplayer", Doc: "isShouldDisplayer is whether the struct implements [ShouldDisplayer], which results\nin additional updating being done at certain points."}}}) +var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/core.Form", IDName: "form", Doc: "Form represents a struct with rows of field names and editable values.", Embeds: []types.Field{{Name: "Frame"}}, Fields: []types.Field{{Name: "Struct", Doc: "Struct is the pointer to the struct that we are viewing."}, {Name: "Inline", Doc: "Inline is whether to display the form in one line."}, {Name: "Modified", Doc: "Modified optionally highlights and tracks fields that have been modified\nthrough an OnChange event. If present, it replaces the default value highlighting\nand resetting logic. Ignored if nil."}, {Name: "structFields", Doc: "structFields are the fields of the current struct."}, {Name: "isShouldDisplayer", Doc: "isShouldDisplayer is whether the struct implements [ShouldDisplayer], which results\nin additional updating being done at certain points."}}}) // NewForm returns a new [Form] with the given optional parent: // Form represents a struct with rows of field names and editable values. @@ -251,6 +251,12 @@ func (t *Form) SetStruct(v any) *Form { t.Struct = v; return t } // Inline is whether to display the form in one line. func (t *Form) SetInline(v bool) *Form { t.Inline = v; return t } +// SetModified sets the [Form.Modified]: +// Modified optionally highlights and tracks fields that have been modified +// through an OnChange event. If present, it replaces the default value highlighting +// and resetting logic. Ignored if nil. +func (t *Form) SetModified(v map[string]bool) *Form { t.Modified = v; return t } + var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/core.Frame", IDName: "frame", Doc: "Frame is the primary node type responsible for organizing the sizes\nand positions of child widgets. It also renders the standard box model.\nAll collections of widgets should generally be contained within a [Frame];\notherwise, the parent widget must take over responsibility for positioning.\nFrames automatically can add scrollbars depending on the [styles.Style.Overflow].\n\nFor a [styles.Grid] frame, the [styles.Style.Columns] property should\ngenerally be set to the desired number of columns, from which the number of rows\nis computed; otherwise, it uses the square root of number of\nelements.", Embeds: []types.Field{{Name: "WidgetBase"}}, Fields: []types.Field{{Name: "StackTop", Doc: "StackTop, for a [styles.Stacked] frame, is the index of the node to use\nas the top of the stack. Only the node at this index is rendered; if it is\nnot a valid index, nothing is rendered."}, {Name: "LayoutStackTopOnly", Doc: "LayoutStackTopOnly is whether to only layout the top widget\n(specified by [Frame.StackTop]) for a [styles.Stacked] frame.\nThis is appropriate for widgets such as [Tabs], which do a full\nredraw on stack changes, but not for widgets such as [Switch]es\nwhich don't."}, {Name: "layout", Doc: "layout contains implementation state info for doing layout"}, {Name: "HasScroll", Doc: "HasScroll is whether scrollbars exist for each dimension."}, {Name: "scrolls", Doc: "scrolls are the scroll bars, which are fully managed as needed."}, {Name: "focusName", Doc: "accumulated name to search for when keys are typed"}, {Name: "focusNameTime", Doc: "time of last focus name event; for timeout"}, {Name: "focusNameLast", Doc: "last element focused on; used as a starting point if name is the same"}}}) // NewFrame returns a new [Frame] with the given optional parent: @@ -1177,7 +1183,7 @@ func NewToolbar(parent ...tree.Node) *Toolbar { return tree.New[Toolbar](parent. var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/core.Treer", IDName: "treer", Doc: "Treer is an interface for [Tree] types\nproviding access to the base [Tree] and\noverridable method hooks for actions taken on the [Tree],\nincluding OnOpen, OnClose, etc.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Methods: []types.Method{{Name: "AsCoreTree", Doc: "AsTree returns the base [Tree] for this node.", Returns: []string{"Tree"}}, {Name: "CanOpen", Doc: "CanOpen returns true if the node is able to open.\nBy default it checks HasChildren(), but could check other properties\nto perform lazy building of the tree.", Returns: []string{"bool"}}, {Name: "OnOpen", Doc: "OnOpen is called when a node is opened.\nThe base version does nothing."}, {Name: "OnClose", Doc: "OnClose is called when a node is closed\nThe base version does nothing."}, {Name: "MimeData", Args: []string{"md"}}, {Name: "Cut"}, {Name: "Copy"}, {Name: "Paste"}, {Name: "DragDrop", Args: []string{"e"}}, {Name: "DropDeleteSource", Args: []string{"e"}}}}) -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/core.Tree", IDName: "tree", Doc: "Tree provides a graphical representation of a tree structure,\nproviding full navigation and manipulation abilities.\n\nIt does not handle layout by itself, so if you want it to scroll\nseparately from the rest of the surrounding context, you must\nplace it in a [Frame].\n\nIf the [Tree.SyncNode] field is non-nil, typically via the\n[Tree.SyncTree] method, then the Tree mirrors another\ntree structure, and tree editing functions apply to\nthe source tree first, and then to the Tree by sync.\n\nOtherwise, data can be directly encoded in a Tree\nderived type, to represent any kind of tree structure\nand associated data.\n\nStandard [events.Event]s are sent to any listeners, including\n[events.Select], [events.Change], and [events.DoubleClick].\nThe selected nodes are in the root [Tree.SelectedNodes] list.", Methods: []types.Method{{Name: "OpenAll", Doc: "OpenAll opens the node and all of its sub-nodes.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "CloseAll", Doc: "CloseAll closes the node and all of its sub-nodes.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "Copy", Doc: "Copy copies the tree to the clipboard.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "Cut", Doc: "Cut copies to [system.Clipboard] and deletes selected items.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "Paste", Doc: "Paste pastes clipboard at given node.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "InsertAfter", Doc: "InsertAfter inserts a new node in the tree\nafter this node, at the same (sibling) level,\nprompting for the type of node to insert.\nIf SyncNode is set, operates on Sync Tree.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "InsertBefore", Doc: "InsertBefore inserts a new node in the tree\nbefore this node, at the same (sibling) level,\nprompting for the type of node to insert\nIf SyncNode is set, operates on Sync Tree.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "AddChildNode", Doc: "AddChildNode adds a new child node to this one in the tree,\nprompting the user for the type of node to add\nIf SyncNode is set, operates on Sync Tree.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "DeleteNode", Doc: "DeleteNode deletes the tree node or sync node corresponding\nto this view node in the sync tree.\nIf SyncNode is set, operates on Sync Tree.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "Duplicate", Doc: "Duplicate duplicates the sync node corresponding to this view node in\nthe tree, and inserts the duplicate after this node (as a new sibling).\nIf SyncNode is set, operates on Sync Tree.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "editNode", Doc: "editNode pulls up a [Form] dialog for the node.\nIf SyncNode is set, operates on Sync Tree.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "inspectNode", Doc: "inspectNode pulls up a new Inspector window on the node.\nIf SyncNode is set, operates on Sync Tree.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}}, Embeds: []types.Field{{Name: "WidgetBase"}}, Fields: []types.Field{{Name: "SyncNode", Doc: "SyncNode, if non-nil, is the [tree.Node] that this widget is\nviewing in the tree (the source). It should be set using\n[Tree.SyncTree]."}, {Name: "Text", Doc: "Text is the text to display for the tree item label, which automatically\ndefaults to the [tree.Node.Name] of the tree node. It has no effect\nif [Tree.SyncNode] is non-nil."}, {Name: "Icon", Doc: "Icon is an optional icon displayed to the the left of the text label."}, {Name: "IconOpen", Doc: "IconOpen is the icon to use for an open (expanded) branch;\nit defaults to [icons.KeyboardArrowDown]."}, {Name: "IconClosed", Doc: "IconClosed is the icon to use for a closed (collapsed) branch;\nit defaults to [icons.KeyboardArrowRight]."}, {Name: "IconLeaf", Doc: "IconLeaf is the icon to use for a terminal node branch that has no children;\nit defaults to [icons.Blank]."}, {Name: "TreeInit", Doc: "TreeInit is a function that can be set on the root node that is called\nwith each child tree node when it is initialized. It is only\ncalled with the root node itself in [Tree.SetTreeInit], so you\nshould typically call that instead of setting this directly."}, {Name: "Indent", Doc: "Indent is the amount to indent children relative to this node.\nIt should be set in a Styler like all other style properties."}, {Name: "OpenDepth", Doc: "OpenDepth is the depth for nodes be initialized as open (default 4).\nNodes beyond this depth will be initialized as closed."}, {Name: "Closed", Doc: "Closed is whether this tree node is currently toggled closed\n(children not visible)."}, {Name: "SelectMode", Doc: "SelectMode, when set on the root node, determines whether keyboard movements should update selection."}, {Name: "viewIndex", Doc: "linear index of this node within the entire tree.\nupdated on full rebuilds and may sometimes be off,\nbut close enough for expected uses"}, {Name: "widgetSize", Doc: "size of just this node widget.\nour alloc includes all of our children, but we only draw us."}, {Name: "root", Doc: "root is the cached root of the tree. It is automatically set."}, {Name: "SelectedNodes", Doc: "SelectedNodes holds the currently selected nodes.\nIt is only set on the root node. See [Tree.GetSelectedNodes]\nfor a version that also works on non-root nodes."}, {Name: "actStateLayer", Doc: "actStateLayer is the actual state layer of the tree, which\nshould be used when rendering it and its parts (but not its children).\nthe reason that it exists is so that the children of the tree\n(other trees) do not inherit its stateful background color, as\nthat does not look good."}, {Name: "inOpen", Doc: "inOpen is set in the Open method to prevent recursive opening for lazy-open nodes."}, {Name: "Branch", Doc: "Branch is the branch widget that is used to open and close the tree node."}}}) +var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/core.Tree", IDName: "tree", Doc: "Tree provides a graphical representation of a tree structure,\nproviding full navigation and manipulation abilities.\n\nIt does not handle layout by itself, so if you want it to scroll\nseparately from the rest of the surrounding context, you must\nplace it in a [Frame].\n\nIf the [Tree.SyncNode] field is non-nil, typically via the\n[Tree.SyncTree] method, then the Tree mirrors another\ntree structure, and tree editing functions apply to\nthe source tree first, and then to the Tree by sync.\n\nOtherwise, data can be directly encoded in a Tree\nderived type, to represent any kind of tree structure\nand associated data.\n\nStandard [events.Event]s are sent to any listeners, including\n[events.Select], [events.Change], and [events.DoubleClick].\nThe selected nodes are in the root [Tree.SelectedNodes] list.", Methods: []types.Method{{Name: "OpenAll", Doc: "OpenAll opens the node and all of its sub-nodes.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "CloseAll", Doc: "CloseAll closes the node and all of its sub-nodes.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "Copy", Doc: "Copy copies the tree to the clipboard.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "Cut", Doc: "Cut copies to [system.Clipboard] and deletes selected items.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "Paste", Doc: "Paste pastes clipboard at given node.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "InsertAfter", Doc: "InsertAfter inserts a new node in the tree\nafter this node, at the same (sibling) level,\nprompting for the type of node to insert.\nIf SyncNode is set, operates on Sync Tree.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "InsertBefore", Doc: "InsertBefore inserts a new node in the tree\nbefore this node, at the same (sibling) level,\nprompting for the type of node to insert\nIf SyncNode is set, operates on Sync Tree.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "AddChildNode", Doc: "AddChildNode adds a new child node to this one in the tree,\nprompting the user for the type of node to add\nIf SyncNode is set, operates on Sync Tree.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "DeleteNode", Doc: "DeleteNode deletes the tree node or sync node corresponding\nto this view node in the sync tree.\nIf SyncNode is set, operates on Sync Tree.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "Duplicate", Doc: "Duplicate duplicates the sync node corresponding to this view node in\nthe tree, and inserts the duplicate after this node (as a new sibling).\nIf SyncNode is set, operates on Sync Tree.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "editNode", Doc: "editNode pulls up a [Form] dialog for the node.\nIf SyncNode is set, operates on Sync Tree.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "inspectNode", Doc: "inspectNode pulls up a new Inspector window on the node.\nIf SyncNode is set, operates on Sync Tree.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}}, Embeds: []types.Field{{Name: "WidgetBase"}}, Fields: []types.Field{{Name: "SyncNode", Doc: "SyncNode, if non-nil, is the [tree.Node] that this widget is\nviewing in the tree (the source). It should be set using\n[Tree.SyncTree]."}, {Name: "Text", Doc: "Text is the text to display for the tree item label, which automatically\ndefaults to the [tree.Node.Name] of the tree node. It has no effect\nif [Tree.SyncNode] is non-nil."}, {Name: "Icon", Doc: "Icon is an optional icon displayed to the the left of the text label."}, {Name: "IconOpen", Doc: "IconOpen is the icon to use for an open (expanded) branch;\nit defaults to [icons.KeyboardArrowDown]."}, {Name: "IconClosed", Doc: "IconClosed is the icon to use for a closed (collapsed) branch;\nit defaults to [icons.KeyboardArrowRight]."}, {Name: "IconLeaf", Doc: "IconLeaf is the icon to use for a terminal node branch that has no children;\nit defaults to [icons.Blank]."}, {Name: "TreeInit", Doc: "TreeInit is a function that can be set on the root node that is called\nwith each child tree node when it is initialized. It is only\ncalled with the root node itself in [Tree.SetTreeInit], so you\nshould typically call that instead of setting this directly."}, {Name: "Indent", Doc: "Indent is the amount to indent children relative to this node.\nIt should be set in a Styler like all other style properties."}, {Name: "OpenDepth", Doc: "OpenDepth is the depth for nodes be initialized as open (default 4).\nNodes beyond this depth will be initialized as closed."}, {Name: "Closed", Doc: "Closed is whether this tree node is currently toggled closed\n(children not visible)."}, {Name: "SelectMode", Doc: "SelectMode, when set on the root node, determines whether keyboard movements should update selection."}, {Name: "viewIndex", Doc: "linear index of this node within the entire tree.\nupdated on full rebuilds and may sometimes be off,\nbut close enough for expected uses"}, {Name: "widgetSize", Doc: "size of just this node widget.\nour alloc includes all of our children, but we only draw us."}, {Name: "Root", Doc: "Root is the cached root of the tree. It is automatically set."}, {Name: "SelectedNodes", Doc: "SelectedNodes holds the currently selected nodes.\nIt is only set on the root node. See [Tree.GetSelectedNodes]\nfor a version that also works on non-root nodes."}, {Name: "actStateLayer", Doc: "actStateLayer is the actual state layer of the tree, which\nshould be used when rendering it and its parts (but not its children).\nthe reason that it exists is so that the children of the tree\n(other trees) do not inherit its stateful background color, as\nthat does not look good."}, {Name: "inOpen", Doc: "inOpen is set in the Open method to prevent recursive opening for lazy-open nodes."}, {Name: "Branch", Doc: "Branch is the branch widget that is used to open and close the tree node."}}}) // NewTree returns a new [Tree] with the given optional parent: // Tree provides a graphical representation of a tree structure, diff --git a/docs/content/2-widgets/other/plots.md b/docs/content/2-widgets/other/plots.md deleted file mode 100644 index 04943fe590..0000000000 --- a/docs/content/2-widgets/other/plots.md +++ /dev/null @@ -1,17 +0,0 @@ -Cogent Core provides interactive and customizable data plots. - -You can make an interactive plot from slice data: - -```Go -type Data struct { - Time float32 - Users float32 - Profit float32 -} -plotcore.NewPlotEditor(b).SetSlice([]Data{ - {0, 500, 1520}, - {1, 800, 860}, - {2, 1600, 930}, - {3, 1400, 682}, -}) -``` diff --git a/docs/docs.go b/docs/docs.go index 6bbbe281f3..603179cf8e 100644 --- a/docs/docs.go +++ b/docs/docs.go @@ -27,7 +27,7 @@ import ( "cogentcore.org/core/texteditor" "cogentcore.org/core/tree" "cogentcore.org/core/yaegicore" - "cogentcore.org/core/yaegicore/symbols" + "cogentcore.org/core/yaegicore/coresymbols" ) //go:embed content @@ -100,10 +100,10 @@ func main() { }) }) - symbols.Symbols["."]["content"] = reflect.ValueOf(content) - symbols.Symbols["."]["myImage"] = reflect.ValueOf(myImage) - symbols.Symbols["."]["mySVG"] = reflect.ValueOf(mySVG) - symbols.Symbols["."]["myFile"] = reflect.ValueOf(myFile) + coresymbols.Symbols["."]["content"] = reflect.ValueOf(content) + coresymbols.Symbols["."]["myImage"] = reflect.ValueOf(myImage) + coresymbols.Symbols["."]["mySVG"] = reflect.ValueOf(mySVG) + coresymbols.Symbols["."]["myFile"] = reflect.ValueOf(myFile) htmlcore.ElementHandlers["home-page"] = homePage htmlcore.ElementHandlers["core-playground"] = func(ctx *htmlcore.Context) bool { diff --git a/enums/enumgen/config.go b/enums/enumgen/config.go index 142dcc5c39..2afee384f2 100644 --- a/enums/enumgen/config.go +++ b/enums/enumgen/config.go @@ -54,4 +54,7 @@ type Config struct { //types:add // whether to allow enums to extend other enums; this should be on in almost all circumstances, // but can be turned off for specific enum types that extend non-enum types Extend bool `default:"true"` + + // generate gosl:start and gosl:end tags around generated N values. + Gosl bool } diff --git a/enums/enumgen/enumgen.go b/enums/enumgen/generate.go similarity index 95% rename from enums/enumgen/enumgen.go rename to enums/enumgen/generate.go index 69d71fcc0e..8eb4bafe38 100644 --- a/enums/enumgen/enumgen.go +++ b/enums/enumgen/generate.go @@ -12,6 +12,7 @@ import ( "fmt" "cogentcore.org/core/base/generate" + "cogentcore.org/core/base/logx" "golang.org/x/tools/go/packages" ) @@ -43,9 +44,12 @@ func ParsePackages(cfg *Config) ([]*packages.Package, error) { func Generate(cfg *Config) error { //types:add pkgs, err := ParsePackages(cfg) if err != nil { + logx.PrintlnInfo(err) return err } - return GeneratePkgs(cfg, pkgs) + err = GeneratePkgs(cfg, pkgs) + logx.PrintlnInfo(err) + return err } // GeneratePkgs generates enum methods using diff --git a/enums/enumgen/enumgen_test.go b/enums/enumgen/generate_test.go similarity index 100% rename from enums/enumgen/enumgen_test.go rename to enums/enumgen/generate_test.go diff --git a/enums/enumgen/generator.go b/enums/enumgen/generator.go index 08babc821c..26405ba79a 100644 --- a/enums/enumgen/generator.go +++ b/enums/enumgen/generator.go @@ -70,7 +70,7 @@ func (g *Generator) PrintHeader() { // or enums:bitflag. It stores the resulting types in [Generator.Types]. func (g *Generator) FindEnumTypes() error { g.Types = []*Type{} - return generate.Inspect(g.Pkg, g.InspectForType) + return generate.Inspect(g.Pkg, g.InspectForType, "enumgen.go", "typegen.go") } // AllowedEnumTypes are the types that can be used for enums @@ -159,7 +159,7 @@ func (g *Generator) Generate() (bool, error) { for _, typ := range g.Types { values := make([]Value, 0, 100) for _, file := range g.Pkg.Syntax { - if ast.IsGenerated(file) { + if generate.ExcludeFile(g.Pkg, file, "enumgen.go", "typegen.go") { continue } var terr error diff --git a/enums/enumgen/methods.go b/enums/enumgen/methods.go index b7cc783130..9c572b0737 100644 --- a/enums/enumgen/methods.go +++ b/enums/enumgen/methods.go @@ -49,6 +49,13 @@ var NConstantTmpl = template.Must(template.New("StringNConstant").Parse( const {{.Name}}N {{.Name}} = {{.MaxValueP1}} `)) +var NConstantTmplGosl = template.Must(template.New("StringNConstant").Parse( + `//gosl:start +//{{.Name}}N is the highest valid value for type {{.Name}}, plus one. +const {{.Name}}N {{.Name}} = {{.MaxValueP1}} +//gosl:end +`)) + var SetStringMethodTmpl = template.Must(template.New("SetStringMethod").Parse( `// SetString sets the {{.Name}} value from its string representation, // and returns an error if the string is invalid. @@ -111,7 +118,11 @@ func (g *Generator) BuildBasicMethods(values []Value, typ *Type) { typ.MaxValueP1 = max + 1 - g.ExecTmpl(NConstantTmpl, typ) + if g.Config.Gosl { + g.ExecTmpl(NConstantTmplGosl, typ) + } else { + g.ExecTmpl(NConstantTmpl, typ) + } // Print the map between name and value g.PrintValueMap(values, typ) diff --git a/enums/enumgen/testdata/enumgen.go b/enums/enumgen/testdata/enumgen.go index e02efad438..df0be10917 100644 --- a/enums/enumgen/testdata/enumgen.go +++ b/enums/enumgen/testdata/enumgen.go @@ -1,4 +1,4 @@ -// Code generated by "enumgen.test -test.testlogfile=/var/folders/x1/r8shprmj7j71zbw3qvgl9dqc0000gq/T/go-build1829688390/b649/testlog.txt -test.paniconexit0 -test.timeout=20s"; DO NOT EDIT. +// Code generated by "enumgen.test -test.paniconexit0 -test.timeout=10m0s"; DO NOT EDIT. package testdata diff --git a/enums/enumgen/testdata/enumgen.golden b/enums/enumgen/testdata/enumgen.golden index df0be10917..4826cd43fd 100644 --- a/enums/enumgen/testdata/enumgen.golden +++ b/enums/enumgen/testdata/enumgen.golden @@ -1,4 +1,4 @@ -// Code generated by "enumgen.test -test.paniconexit0 -test.timeout=10m0s"; DO NOT EDIT. +// Code generated by "enumgen.test -test.paniconexit0 -test.timeout=10m0s -test.v=true"; DO NOT EDIT. package testdata diff --git a/enums/enumgen/typegen.go b/enums/enumgen/typegen.go index eae66358d7..b60216af42 100644 --- a/enums/enumgen/typegen.go +++ b/enums/enumgen/typegen.go @@ -6,6 +6,6 @@ import ( "cogentcore.org/core/types" ) -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/enums/enumgen.Config", IDName: "config", Doc: "Config contains the configuration information\nused by enumgen", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Fields: []types.Field{{Name: "Dir", Doc: "the source directory to run enumgen on (can be set to multiple through paths like ./...)"}, {Name: "Output", Doc: "the output file location relative to the package on which enumgen is being called"}, {Name: "Transform", Doc: "if specified, the enum item transformation method (upper, lower, snake, SNAKE, kebab, KEBAB,\ncamel, lower-camel, title, sentence, first, first-upper, or first-lower)"}, {Name: "TrimPrefix", Doc: "if specified, a comma-separated list of prefixes to trim from each item"}, {Name: "AddPrefix", Doc: "if specified, the prefix to add to each item"}, {Name: "LineComment", Doc: "whether to use line comment text as printed text when present"}, {Name: "AcceptLower", Doc: "whether to accept lowercase versions of enum names in SetString"}, {Name: "IsValid", Doc: "whether to generate a method returning whether a value is\na valid option for its enum type; this must also be set for\nany base enum type being extended"}, {Name: "Text", Doc: "whether to generate text marshaling methods"}, {Name: "SQL", Doc: "whether to generate methods that implement the SQL Scanner and Valuer interfaces"}, {Name: "GQL", Doc: "whether to generate GraphQL marshaling methods for gqlgen"}, {Name: "Extend", Doc: "whether to allow enums to extend other enums; this should be on in almost all circumstances,\nbut can be turned off for specific enum types that extend non-enum types"}}}) +var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/enums/enumgen.Config", IDName: "config", Doc: "Config contains the configuration information\nused by enumgen", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Fields: []types.Field{{Name: "Dir", Doc: "the source directory to run enumgen on (can be set to multiple through paths like ./...)"}, {Name: "Output", Doc: "the output file location relative to the package on which enumgen is being called"}, {Name: "Transform", Doc: "if specified, the enum item transformation method (upper, lower, snake, SNAKE, kebab, KEBAB,\ncamel, lower-camel, title, sentence, first, first-upper, or first-lower)"}, {Name: "TrimPrefix", Doc: "if specified, a comma-separated list of prefixes to trim from each item"}, {Name: "AddPrefix", Doc: "if specified, the prefix to add to each item"}, {Name: "LineComment", Doc: "whether to use line comment text as printed text when present"}, {Name: "AcceptLower", Doc: "whether to accept lowercase versions of enum names in SetString"}, {Name: "IsValid", Doc: "whether to generate a method returning whether a value is\na valid option for its enum type; this must also be set for\nany base enum type being extended"}, {Name: "Text", Doc: "whether to generate text marshaling methods"}, {Name: "SQL", Doc: "whether to generate methods that implement the SQL Scanner and Valuer interfaces"}, {Name: "GQL", Doc: "whether to generate GraphQL marshaling methods for gqlgen"}, {Name: "Extend", Doc: "whether to allow enums to extend other enums; this should be on in almost all circumstances,\nbut can be turned off for specific enum types that extend non-enum types"}, {Name: "Gosl", Doc: "generate gosl:start and gosl:end tags around generated N values."}}}) var _ = types.AddFunc(&types.Func{Name: "cogentcore.org/core/enums/enumgen.Generate", Doc: "Generate generates enum methods, using the\nconfiguration information, loading the packages from the\nconfiguration source directory, and writing the result\nto the configuration output file.\n\nIt is a simple entry point to enumgen that does all\nof the steps; for more specific functionality, create\na new [Generator] with [NewGenerator] and call methods on it.", Directives: []types.Directive{{Tool: "cli", Directive: "cmd", Args: []string{"-root"}}, {Tool: "types", Directive: "add"}}, Args: []string{"cfg"}, Returns: []string{"error"}}) diff --git a/examples/plot/plot.go b/examples/plot/plot.go deleted file mode 100644 index 226f565e72..0000000000 --- a/examples/plot/plot.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "embed" - - "cogentcore.org/core/core" - "cogentcore.org/core/plot/plotcore" - "cogentcore.org/core/tensor/table" -) - -//go:embed *.tsv -var tsv embed.FS - -func main() { - b := core.NewBody("Plot Example") - - epc := table.NewTable("epc") - epc.OpenFS(tsv, "ra25epoch.tsv", table.Tab) - - pl := plotcore.NewPlotEditor(b) - pl.Options.Title = "RA25 Epoch Train" - pl.Options.XAxis = "Epoch" - pl.Options.Points = true - pl.SetTable(epc) - pl.ColumnOptions("UnitErr").On = true - b.AddTopBar(func(bar *core.Frame) { - core.NewToolbar(bar).Maker(pl.MakeToolbar) - }) - - b.RunMainWindow() -} diff --git a/examples/plot/ra25epoch.tsv b/examples/plot/ra25epoch.tsv deleted file mode 100644 index c7ec83530b..0000000000 --- a/examples/plot/ra25epoch.tsv +++ /dev/null @@ -1,48 +0,0 @@ -|Run |Epoch $RunName #CorSim #UnitErr #PctErr #PctCor #TstCorSim #TstUnitErr #TstPctCor #TstPctErr #PerTrlMSec #Hidden1_ActMAvg #Hidden1_ActMMax #Hidden1_MaxGeM #Hidden1_CorDiff #Hidden1_GiMult #Hidden2_ActMAvg #Hidden2_ActMMax #Hidden2_MaxGeM #Hidden2_CorDiff #Hidden2_GiMult #Output_ActMAvg #Output_ActMMax #Output_MaxGeM #Output_CorDiff #Output_GiMult #Input_ActAvg #Hidden1_PCA_NStrong #Hidden1_PCA_Top5 #Hidden1_PCA_Next5 #Hidden1_PCA_Rest #Hidden2_PCA_NStrong #Hidden2_PCA_Top5 #Hidden2_PCA_Next5 #Hidden2_PCA_Rest #Output_PCA_NStrong #Output_PCA_Top5 #Output_PCA_Next5 #Output_PCA_Rest -0 0 Base 0.002623 0.2525 1 0 0 0 0 0 0 0.07241 0.6488 1.021 0.01213 1 0.07222 0.4663 0.9947 0.08514 1 0.2407 0.6594 0.9931 0.9974 1 0.1537 24 0.1876 0.09636 0.005853 24 0.1082 0.05629 0.004091 10 0.05466 0.01813 0.003332 -0 1 Base 0.305 0.2375 1 0 0 0 0 0 12.42 0.07315 0.646 1.043 0.009661 1 0.07154 0.4906 0.9892 0.06949 1 0.2354 0.6632 0.9878 0.695 1 0.1603 24 0.1876 0.09636 0.005853 24 0.1082 0.05629 0.004091 10 0.05466 0.01813 0.003332 -0 2 Base 0.4382 0.2288 1 0 0 0 0 0 11.02 0.07365 0.6484 1.061 0.01141 1 0.07133 0.5045 0.9881 0.06971 1 0.2351 0.5644 0.9815 0.5618 1 0.1664 24 0.1876 0.09636 0.005853 24 0.1082 0.05629 0.004091 10 0.05466 0.01813 0.003332 -0 3 Base 0.473 0.2275 1 0 0 0 0 0 11.11 0.07348 0.6489 1.078 0.01176 1 0.07126 0.493 0.9866 0.08268 1 0.2335 0.5932 0.9778 0.527 1 0.1718 24 0.1876 0.09636 0.005853 24 0.1082 0.05629 0.004091 10 0.05466 0.01813 0.003332 -0 4 Base 0.5577 0.22 1 0 0.5551 0.2088 0 1 13.14 0.0731 0.6558 1.11 0.01174 1 0.07236 0.4924 0.9821 0.07721 1 0.2391 0.7094 0.9724 0.4423 1 0.1811 24 0.1876 0.09636 0.005853 24 0.1082 0.05629 0.004091 10 0.05466 0.01813 0.003332 -0 5 Base 0.6384 0.1813 1 0 0.5551 0.2088 0 1 11.19 0.07303 0.6465 1.122 0.01148 1 0.07182 0.4917 0.9855 0.0753 1 0.2437 0.7634 0.9705 0.3616 1 0.1851 21 0.2123 0.1116 0.005676 21 0.1164 0.0685 0.003969 12 0.0756 0.02649 0.004503 -0 6 Base 0.6672 0.1763 0.9375 0.0625 0.5551 0.2088 0 1 11.2 0.07343 0.6434 1.134 0.009667 1 0.07291 0.4896 0.9809 0.06602 1 0.2426 0.7192 0.9678 0.3328 1 0.1887 21 0.2123 0.1116 0.005676 21 0.1164 0.0685 0.003969 12 0.0756 0.02649 0.004503 -0 7 Base 0.6644 0.1788 1 0 0.5551 0.2088 0 1 11.14 0.07337 0.6417 1.145 0.01117 1 0.07198 0.4965 0.9799 0.07333 1 0.2482 0.7953 0.9706 0.3356 1 0.192 21 0.2123 0.1116 0.005676 21 0.1164 0.0685 0.003969 12 0.0756 0.02649 0.004503 -0 8 Base 0.7187 0.1388 0.875 0.125 0.5551 0.2088 0 1 11.1 0.07382 0.6503 1.156 0.01262 1 0.07264 0.4992 0.9787 0.08156 1 0.2615 0.8592 0.9712 0.2813 1 0.1949 21 0.2123 0.1116 0.005676 21 0.1164 0.0685 0.003969 12 0.0756 0.02649 0.004503 -0 9 Base 0.7732 0.1088 0.8125 0.1875 0.7553 0.135 0 1 13.01 0.07344 0.6376 1.175 0.008834 1 0.07177 0.5021 0.9802 0.05816 1 0.2536 0.8328 0.9813 0.2268 1 0.1999 21 0.2123 0.1116 0.005676 21 0.1164 0.0685 0.003969 12 0.0756 0.02649 0.004503 -0 10 Base 0.7861 0.1113 0.8125 0.1875 0.7553 0.135 0 1 11.3 0.07344 0.6511 1.183 0.009321 1 0.07226 0.5157 0.9819 0.05857 1 0.2561 0.8696 0.9849 0.2139 1 0.2021 24 0.2092 0.1191 0.006909 24 0.1245 0.06798 0.004171 15 0.1318 0.04342 0.006997 -0 11 Base 0.7785 0.1038 0.75 0.25 0.7553 0.135 0 1 11.02 0.07336 0.6428 1.19 0.01263 1 0.07282 0.4994 0.9843 0.0738 1 0.2584 0.8563 0.9899 0.2215 1 0.204 24 0.2092 0.1191 0.006909 24 0.1245 0.06798 0.004171 15 0.1318 0.04342 0.006997 -0 12 Base 0.8289 0.07625 0.7188 0.2812 0.7553 0.135 0 1 11.02 0.07397 0.6507 1.199 0.01002 1 0.0725 0.5207 0.9899 0.05569 1 0.261 0.9157 0.9903 0.1711 1 0.2057 24 0.2092 0.1191 0.006909 24 0.1245 0.06798 0.004171 15 0.1318 0.04342 0.006997 -0 13 Base 0.8539 0.06375 0.6875 0.3125 0.7553 0.135 0 1 11.07 0.0736 0.6463 1.204 0.009677 1 0.0729 0.5059 0.9907 0.05554 1 0.2537 0.9003 0.9919 0.1461 1 0.2073 24 0.2092 0.1191 0.006909 24 0.1245 0.06798 0.004171 15 0.1318 0.04342 0.006997 -0 14 Base 0.8322 0.0725 0.6875 0.3125 0.8393 0.0725 0.375 0.625 12.77 0.07361 0.6534 1.216 0.01075 1 0.07271 0.5212 0.9916 0.06336 1 0.2677 0.9293 0.999 0.1678 1 0.21 24 0.2092 0.1191 0.006909 24 0.1245 0.06798 0.004171 15 0.1318 0.04342 0.006997 -0 15 Base 0.8727 0.05125 0.5312 0.4688 0.8393 0.0725 0.375 0.625 10.97 0.07341 0.653 1.221 0.009811 1 0.07342 0.5275 0.9929 0.05527 1 0.2599 0.9216 1.003 0.1273 1 0.2112 21 0.2286 0.1285 0.005968 21 0.1289 0.08213 0.003995 16 0.1836 0.06461 0.01035 -0 16 Base 0.881 0.04125 0.5 0.5 0.8393 0.0725 0.375 0.625 10.81 0.07384 0.6527 1.227 0.009199 1 0.0731 0.527 0.9939 0.04453 1 0.2645 0.9465 1.008 0.119 1 0.2122 21 0.2286 0.1285 0.005968 21 0.1289 0.08213 0.003995 16 0.1836 0.06461 0.01035 -0 17 Base 0.8809 0.0425 0.5938 0.4062 0.8393 0.0725 0.375 0.625 11.12 0.07405 0.6622 1.233 0.008919 1 0.07259 0.5308 0.9988 0.04804 1 0.2633 0.9405 1.01 0.1191 1 0.2132 21 0.2286 0.1285 0.005968 21 0.1289 0.08213 0.003995 16 0.1836 0.06461 0.01035 -0 18 Base 0.8872 0.03625 0.4375 0.5625 0.8393 0.0725 0.375 0.625 10.77 0.07405 0.6662 1.238 0.009321 1 0.07355 0.5362 1.001 0.0502 1 0.2612 0.9393 1.012 0.1128 1 0.214 21 0.2286 0.1285 0.005968 21 0.1289 0.08213 0.003995 16 0.1836 0.06461 0.01035 -0 19 Base 0.8825 0.04 0.4375 0.5625 0.9115 0.0225 0.6562 0.3438 12.81 0.07401 0.6626 1.252 0.008725 1 0.07376 0.5265 1.007 0.04535 1 0.2641 0.9256 1.02 0.1175 1 0.2154 21 0.2286 0.1285 0.005968 21 0.1289 0.08213 0.003995 16 0.1836 0.06461 0.01035 -0 20 Base 0.9157 0.02625 0.3125 0.6875 0.9115 0.0225 0.6562 0.3438 10.96 0.07414 0.6556 1.257 0.008213 1 0.07337 0.5475 1.011 0.0385 1 0.2643 0.9627 1.028 0.08429 1 0.216 21 0.2531 0.133 0.006013 21 0.1505 0.07946 0.003921 16 0.231 0.07662 0.0119 -0 21 Base 0.9183 0.0175 0.2812 0.7188 0.9115 0.0225 0.6562 0.3438 10.89 0.07442 0.6688 1.263 0.00851 1 0.07321 0.5387 1.013 0.03879 1 0.2606 0.9467 1.031 0.08168 1 0.2166 21 0.2531 0.133 0.006013 21 0.1505 0.07946 0.003921 16 0.231 0.07662 0.0119 -0 22 Base 0.928 0.01125 0.1875 0.8125 0.9115 0.0225 0.6562 0.3438 10.73 0.07388 0.6622 1.269 0.007702 1 0.07309 0.5295 1.015 0.03272 1 0.2651 0.9686 1.033 0.07197 1 0.2171 21 0.2531 0.133 0.006013 21 0.1505 0.07946 0.003921 16 0.231 0.07662 0.0119 -0 23 Base 0.932 0.0125 0.1875 0.8125 0.9115 0.0225 0.6562 0.3438 10.71 0.07482 0.6648 1.276 0.007855 1 0.07331 0.5547 1.021 0.03814 1 0.2643 0.9709 1.04 0.06799 1 0.2176 21 0.2531 0.133 0.006013 21 0.1505 0.07946 0.003921 16 0.231 0.07662 0.0119 -0 24 Base 0.909 0.0225 0.2188 0.7812 0.9203 0.01875 0.8125 0.1875 12.87 0.07458 0.6704 1.284 0.008471 1 0.0731 0.5418 1.027 0.03311 1 0.2679 0.9753 1.051 0.09104 1 0.2184 21 0.2531 0.133 0.006013 21 0.1505 0.07946 0.003921 16 0.231 0.07662 0.0119 -0 25 Base 0.9181 0.02 0.2188 0.7812 0.9203 0.01875 0.8125 0.1875 10.73 0.075 0.6827 1.289 0.009043 1 0.07381 0.5343 1.029 0.04152 1 0.2652 0.9715 1.054 0.08188 1 0.2187 24 0.2576 0.1187 0.007354 23 0.1501 0.07645 0.004662 17 0.2128 0.09799 0.01463 -0 26 Base 0.9325 0.01125 0.125 0.875 0.9203 0.01875 0.8125 0.1875 10.57 0.0744 0.6656 1.297 0.007468 1 0.07385 0.5538 1.031 0.02995 1 0.2587 0.9746 1.06 0.0675 1 0.219 24 0.2576 0.1187 0.007354 23 0.1501 0.07645 0.004662 17 0.2128 0.09799 0.01463 -0 27 Base 0.9352 0.005 0.125 0.875 0.9203 0.01875 0.8125 0.1875 10.79 0.07504 0.6666 1.303 0.007872 1 0.07343 0.5468 1.033 0.03298 1 0.2617 0.9805 1.067 0.06477 1 0.2194 24 0.2576 0.1187 0.007354 23 0.1501 0.07645 0.004662 17 0.2128 0.09799 0.01463 -0 28 Base 0.9344 0.00875 0.09375 0.9062 0.9203 0.01875 0.8125 0.1875 10.7 0.07497 0.6728 1.309 0.006918 1 0.07319 0.5465 1.037 0.02824 1 0.2624 0.9775 1.07 0.06555 1 0.2196 24 0.2576 0.1187 0.007354 23 0.1501 0.07645 0.004662 17 0.2128 0.09799 0.01463 -0 29 Base 0.9444 0.005 0.125 0.875 0.9404 0.00875 0.875 0.125 12.64 0.07495 0.6559 1.321 0.008182 1 0.07303 0.5548 1.049 0.03021 1 0.259 0.9734 1.081 0.05559 1 0.22 24 0.2576 0.1187 0.007354 23 0.1501 0.07645 0.004662 17 0.2128 0.09799 0.01463 -0 30 Base 0.9536 0.00625 0.125 0.875 0.9404 0.00875 0.875 0.125 10.65 0.07464 0.6754 1.326 0.006765 1 0.07384 0.5558 1.048 0.02781 1 0.2593 0.9811 1.085 0.04638 1 0.2202 20 0.2767 0.1455 0.006508 19 0.1686 0.09263 0.004174 16 0.2711 0.1159 0.01494 -0 31 Base 0.9525 0.00375 0.0625 0.9375 0.9404 0.00875 0.875 0.125 10.8 0.07543 0.679 1.334 0.008205 1 0.07345 0.5503 1.056 0.027 1 0.2554 0.9787 1.09 0.0475 1 0.2203 20 0.2767 0.1455 0.006508 19 0.1686 0.09263 0.004174 16 0.2711 0.1159 0.01494 -0 32 Base 0.9512 0.005 0.09375 0.9062 0.9404 0.00875 0.875 0.125 10.56 0.07593 0.6808 1.34 0.008235 1 0.07356 0.5628 1.059 0.03047 1 0.257 0.9755 1.093 0.04885 1 0.2205 20 0.2767 0.1455 0.006508 19 0.1686 0.09263 0.004174 16 0.2711 0.1159 0.01494 -0 33 Base 0.9525 0.005 0.09375 0.9062 0.9404 0.00875 0.875 0.125 10.65 0.07651 0.6866 1.344 0.007291 1 0.07295 0.5617 1.067 0.02448 1 0.2608 0.9898 1.094 0.04746 1 0.2207 20 0.2767 0.1455 0.006508 19 0.1686 0.09263 0.004174 16 0.2711 0.1159 0.01494 -0 34 Base 0.9511 0.005 0.09375 0.9062 0.9535 0.00125 0.9688 0.03125 12.6 0.07584 0.6793 1.353 0.006549 1 0.07333 0.57 1.072 0.02668 1 0.256 0.9793 1.103 0.04893 1 0.2209 20 0.2767 0.1455 0.006508 19 0.1686 0.09263 0.004174 16 0.2711 0.1159 0.01494 -0 35 Base 0.95 0.00875 0.1562 0.8438 0.9535 0.00125 0.9688 0.03125 10.72 0.07564 0.6955 1.358 0.007209 1 0.07355 0.5729 1.076 0.02475 1 0.2607 0.9845 1.103 0.04996 1 0.221 24 0.2643 0.1444 0.007958 23 0.1695 0.09511 0.005056 18 0.2649 0.1195 0.01812 -0 36 Base 0.9478 0.00625 0.125 0.875 0.9535 0.00125 0.9688 0.03125 10.44 0.0758 0.6768 1.362 0.007061 1 0.07356 0.562 1.079 0.02375 1 0.258 0.9799 1.107 0.05223 1 0.2211 24 0.2643 0.1444 0.007958 23 0.1695 0.09511 0.005056 18 0.2649 0.1195 0.01812 -0 37 Base 0.9577 0.00375 0.0625 0.9375 0.9535 0.00125 0.9688 0.03125 10.5 0.07661 0.701 1.366 0.007002 1 0.07417 0.5805 1.088 0.02054 1 0.2537 0.9802 1.11 0.04235 1 0.2212 24 0.2643 0.1444 0.007958 23 0.1695 0.09511 0.005056 18 0.2649 0.1195 0.01812 -0 38 Base 0.9608 0.00125 0.03125 0.9688 0.9535 0.00125 0.9688 0.03125 10.6 0.07604 0.7044 1.371 0.006446 1 0.07395 0.5717 1.09 0.02204 1 0.2535 0.9792 1.111 0.03917 1 0.2213 24 0.2643 0.1444 0.007958 23 0.1695 0.09511 0.005056 18 0.2649 0.1195 0.01812 -0 39 Base 0.96 0.00375 0.09375 0.9062 0.959 0.0025 0.9375 0.0625 12.57 0.07629 0.6914 1.379 0.007166 1 0.07323 0.5777 1.097 0.02131 1 0.2561 0.9878 1.115 0.04005 1 0.2214 24 0.2643 0.1444 0.007958 23 0.1695 0.09511 0.005056 18 0.2649 0.1195 0.01812 -0 40 Base 0.9624 0.00125 0.03125 0.9688 0.959 0.0025 0.9375 0.0625 10.52 0.07664 0.7003 1.384 0.005926 1 0.07419 0.5797 1.099 0.01999 1 0.258 0.9917 1.118 0.03758 1 0.2214 20 0.3151 0.1524 0.006273 20 0.1988 0.09904 0.00402 17 0.2949 0.1143 0.01642 -0 41 Base 0.9605 0.0025 0.0625 0.9375 0.959 0.0025 0.9375 0.0625 10.53 0.07656 0.6925 1.389 0.006171 1 0.07358 0.5915 1.106 0.01794 1 0.252 0.9934 1.119 0.03954 1 0.2215 20 0.3151 0.1524 0.006273 20 0.1988 0.09904 0.00402 17 0.2949 0.1143 0.01642 -0 42 Base 0.9577 0.0025 0.0625 0.9375 0.959 0.0025 0.9375 0.0625 10.45 0.07676 0.7049 1.391 0.006068 1 0.07406 0.5734 1.112 0.02369 1 0.256 0.9839 1.124 0.04233 1 0.2215 20 0.3151 0.1524 0.006273 20 0.1988 0.09904 0.00402 17 0.2949 0.1143 0.01642 -0 43 Base 0.9703 0 0 1 0.959 0.0025 0.9375 0.0625 10.27 0.07618 0.7057 1.395 0.005172 1 0.07343 0.5834 1.114 0.01613 1 0.254 0.9963 1.127 0.02969 1 0.2216 20 0.3151 0.1524 0.006273 20 0.1988 0.09904 0.00402 17 0.2949 0.1143 0.01642 -0 44 Base 0.97 0.00125 0.03125 0.9688 0.9676 0.0025 0.9375 0.0625 12.32 0.07682 0.7062 1.404 0.005644 1 0.07353 0.5933 1.124 0.01662 1 0.2515 0.993 1.131 0.03004 1 0.2216 20 0.3151 0.1524 0.006273 20 0.1988 0.09904 0.00402 17 0.2949 0.1143 0.01642 -0 45 Base 0.9661 0 0 1 0.9676 0.0025 0.9375 0.0625 10.67 0.07726 0.7074 1.409 0.005708 1 0.07429 0.5932 1.126 0.01833 1 0.255 0.9913 1.131 0.03392 1 0.2217 23 0.308 0.1482 0.008264 23 0.2055 0.09122 0.00535 18 0.3067 0.1335 0.02283 -0 46 Base 0.9682 0 0 1 0.9676 0.0025 0.9375 0.0625 10.34 0.07727 0.705 1.412 0.005763 1 0.07392 0.591 1.128 0.01715 1 0.2516 0.9877 1.132 0.03176 1 0.2217 23 0.308 0.1482 0.008264 23 0.2055 0.09122 0.00535 18 0.3067 0.1335 0.02283 diff --git a/filetree/copypaste.go b/filetree/copypaste.go index f75dd7ae6d..9c7244a74c 100644 --- a/filetree/copypaste.go +++ b/filetree/copypaste.go @@ -24,7 +24,7 @@ import ( // MimeData adds mimedata for this node: a text/plain of the Path, // text/plain of filename, and text/ func (fn *Node) MimeData(md *mimedata.Mimes) { - froot := fn.FileRoot + froot := fn.FileRoot() path := string(fn.Filepath) punq := fn.PathFrom(froot) // note: tree paths have . escaped -> \, *md = append(*md, mimedata.NewTextData(punq)) @@ -77,7 +77,7 @@ func (fn *Node) DragDrop(e events.Event) { // that is non-nil (otherwise just uses absolute path), and returns list of existing // and node for last one if exists. func (fn *Node) pasteCheckExisting(tfn *Node, md mimedata.Mimes, externalDrop bool) ([]string, *Node) { - froot := fn.FileRoot + froot := fn.FileRoot() tpath := "" if tfn != nil { tpath = string(tfn.Filepath) @@ -118,7 +118,7 @@ func (fn *Node) pasteCheckExisting(tfn *Node, md mimedata.Mimes, externalDrop bo // pasteCopyFiles copies files in given data into given target directory func (fn *Node) pasteCopyFiles(tdir *Node, md mimedata.Mimes, externalDrop bool) { - froot := fn.FileRoot + froot := fn.FileRoot() nf := len(md) if !externalDrop { nf /= 3 @@ -283,7 +283,7 @@ func (fn *Node) pasteFiles(md mimedata.Mimes, externalDrop bool, dropFinal func( // satisfies core.DragNDropper interface and can be overridden by subtypes func (fn *Node) DropDeleteSource(e events.Event) { de := e.(*events.DragDrop) - froot := fn.FileRoot + froot := fn.FileRoot() if froot == nil || fn.isExternal() { return } diff --git a/filetree/enumgen.go b/filetree/enumgen.go index 9d2068dcb7..f01aa7ef8b 100644 --- a/filetree/enumgen.go +++ b/filetree/enumgen.go @@ -13,7 +13,7 @@ const dirFlagsN dirFlags = 3 var _dirFlagsValueMap = map[string]dirFlags{`IsOpen`: 0, `SortByName`: 1, `SortByModTime`: 2} -var _dirFlagsDescMap = map[dirFlags]string{0: `dirIsOpen means directory is open -- else closed`, 1: `dirSortByName means sort the directory entries by name. this is mutex with other sorts -- keeping option open for non-binary sort choices.`, 2: `dirSortByModTime means sort the directory entries by modification time`} +var _dirFlagsDescMap = map[dirFlags]string{0: `dirIsOpen means directory is open -- else closed`, 1: `dirSortByName means sort the directory entries by name. this overrides SortByModTime default on Tree if set.`, 2: `dirSortByModTime means sort the directory entries by modification time.`} var _dirFlagsMap = map[dirFlags]string{0: `IsOpen`, 1: `SortByName`, 2: `SortByModTime`} diff --git a/filetree/file.go b/filetree/file.go index 813c731b36..dc6082888a 100644 --- a/filetree/file.go +++ b/filetree/file.go @@ -59,7 +59,7 @@ func (fn *Node) OpenFileDefault() error { // duplicateFiles makes a copy of selected files func (fn *Node) duplicateFiles() { //types:add - fn.FileRoot.NeedsLayout() + fn.FileRoot().NeedsLayout() fn.SelectedFunc(func(sn *Node) { sn.duplicateFile() }) @@ -92,7 +92,7 @@ func (fn *Node) deleteFiles() { //types:add // deleteFilesImpl does the actual deletion, no prompts func (fn *Node) deleteFilesImpl() { - fn.FileRoot.NeedsLayout() + fn.FileRoot().NeedsLayout() fn.SelectedFunc(func(sn *Node) { if !sn.Info.IsDir() { sn.deleteFile() @@ -100,7 +100,7 @@ func (fn *Node) deleteFilesImpl() { } var fns []string sn.Info.Filenames(&fns) - ft := sn.FileRoot + ft := sn.FileRoot() for _, filename := range fns { sn, ok := ft.FindFile(filename) if !ok { @@ -145,7 +145,7 @@ func (fn *Node) deleteFile() error { // renames any selected files func (fn *Node) RenameFiles() { //types:add - fn.FileRoot.NeedsLayout() + fn.FileRoot().NeedsLayout() fn.SelectedFunc(func(sn *Node) { fb := core.NewSoloFuncButton(sn).SetFunc(sn.RenameFile) fb.Args[0].SetValue(sn.Name) @@ -158,7 +158,7 @@ func (fn *Node) RenameFile(newpath string) error { //types:add if fn.isExternal() { return nil } - root := fn.FileRoot + root := fn.FileRoot() var err error fn.closeBuf() // invalid after this point orgpath := fn.Filepath @@ -167,8 +167,8 @@ func (fn *Node) RenameFile(newpath string) error { //types:add return err } if fn.IsDir() { - if fn.FileRoot.isDirOpen(orgpath) { - fn.FileRoot.setDirOpen(core.Filename(newpath)) + if fn.FileRoot().isDirOpen(orgpath) { + fn.FileRoot().setDirOpen(core.Filename(newpath)) } } repo, _ := fn.Repo() @@ -230,15 +230,15 @@ func (fn *Node) newFile(filename string, addToVCS bool) { //types:add return } if addToVCS { - nfn, ok := fn.FileRoot.FindFile(np) - if ok && nfn.This != fn.FileRoot.This && string(nfn.Filepath) == np { + nfn, ok := fn.FileRoot().FindFile(np) + if ok && !nfn.IsRoot() && string(nfn.Filepath) == np { // todo: this is where it is erroneously adding too many files to vcs! fmt.Println("Adding new file to VCS:", nfn.Filepath) core.MessageSnackbar(fn, "Adding new file to VCS: "+fsx.DirAndFile(string(nfn.Filepath))) nfn.AddToVCS() } } - fn.FileRoot.UpdatePath(np) + fn.FileRoot().UpdatePath(np) } // makes a new folder in the given selected directory @@ -267,7 +267,7 @@ func (fn *Node) newFolder(foldername string) { //types:add core.ErrorSnackbar(fn, err) return } - fn.FileRoot.UpdatePath(ppath) + fn.FileRoot().UpdatePath(ppath) } // copyFileToDir copies given file path into node that is a directory. @@ -280,11 +280,11 @@ func (fn *Node) copyFileToDir(filename string, perm os.FileMode) { sfn := filepath.Base(filename) tpath := filepath.Join(ppath, sfn) fileinfo.CopyFile(tpath, filename, perm) - fn.FileRoot.UpdatePath(ppath) - ofn, ok := fn.FileRoot.FindFile(filename) + fn.FileRoot().UpdatePath(ppath) + ofn, ok := fn.FileRoot().FindFile(filename) if ok && ofn.Info.VCS >= vcs.Stored { - nfn, ok := fn.FileRoot.FindFile(tpath) - if ok && nfn.This != fn.FileRoot.This { + nfn, ok := fn.FileRoot().FindFile(tpath) + if ok && !nfn.IsRoot() { if string(nfn.Filepath) != tpath { fmt.Printf("error: nfn.FPath != tpath; %q != %q, see bug #453\n", nfn.Filepath, tpath) } else { diff --git a/filetree/find.go b/filetree/find.go index b48b8a912f..7ecac4c304 100644 --- a/filetree/find.go +++ b/filetree/find.go @@ -62,7 +62,7 @@ func (fn *Node) FindFile(fnm string) (*Node, bool) { } } - if efn, err := fn.FileRoot.externalNodeByPath(fnm); err == nil { + if efn, err := fn.FileRoot().externalNodeByPath(fnm); err == nil { return efn, true } diff --git a/filetree/menu.go b/filetree/menu.go index 0f30d373b1..f3d5dd3a4d 100644 --- a/filetree/menu.go +++ b/filetree/menu.go @@ -26,7 +26,7 @@ func vcsLabelFunc(fn *Node, label string) string { } func (fn *Node) VCSContextMenu(m *core.Scene) { - if fn.FileRoot.FS != nil { + if fn.FileRoot().FS != nil { return } core.NewFuncButton(m).SetFunc(fn.addToVCSSelected).SetText(vcsLabelFunc(fn, "Add to VCS")).SetIcon(icons.Add). diff --git a/filetree/node.go b/filetree/node.go index 433ae767bf..b6c4ed11cb 100644 --- a/filetree/node.go +++ b/filetree/node.go @@ -21,7 +21,6 @@ import ( "cogentcore.org/core/base/fsx" "cogentcore.org/core/base/vcs" "cogentcore.org/core/colors" - "cogentcore.org/core/colors/gradient" "cogentcore.org/core/core" "cogentcore.org/core/events" "cogentcore.org/core/events/key" @@ -53,9 +52,6 @@ type Node struct { //core:embedder // Buffer is the file buffer for editing this file. Buffer *texteditor.Buffer `edit:"-" set:"-" json:"-" xml:"-" copier:"-"` - // FileRoot is the root [Tree] of the tree, which has global state. - FileRoot *Tree `edit:"-" set:"-" json:"-" xml:"-" copier:"-"` - // DirRepo is the version control system repository for this directory, // only non-nil if this is the highest-level directory in the tree under vcs control. DirRepo vcs.Repo `edit:"-" set:"-" json:"-" xml:"-" copier:"-"` @@ -70,6 +66,11 @@ func (fn *Node) AsFileNode() *Node { return fn } +// FileRoot returns the Root node as a [Tree]. +func (fn *Node) FileRoot() *Tree { + return AsTree(fn.Root) +} + func (fn *Node) Init() { fn.Tree.Init() fn.IconOpen = icons.FolderOpen @@ -78,22 +79,29 @@ func (fn *Node) Init() { fn.AddContextMenu(fn.contextMenu) fn.Styler(func(s *styles.Style) { status := fn.Info.VCS + hex := "" switch { case status == vcs.Untracked: - s.Color = errors.Must1(gradient.FromString("#808080")) + hex = "#808080" case status == vcs.Modified: - s.Color = errors.Must1(gradient.FromString("#4b7fd1")) + hex = "#4b7fd1" case status == vcs.Added: - s.Color = errors.Must1(gradient.FromString("#008800")) + hex = "#008800" case status == vcs.Deleted: - s.Color = errors.Must1(gradient.FromString("#ff4252")) + hex = "#ff4252" case status == vcs.Conflicted: - s.Color = errors.Must1(gradient.FromString("#ce8020")) + hex = "#ce8020" case status == vcs.Updated: - s.Color = errors.Must1(gradient.FromString("#008060")) + hex = "#008060" case status == vcs.Stored: s.Color = colors.Scheme.OnSurface } + if fn.Info.Generated { + hex = "#8080C0" + } + if hex != "" { + s.Color = colors.Uniform(colors.ToBase(errors.Must1(colors.FromHex(hex)))) + } }) fn.On(events.KeyChord, func(e events.Event) { if core.DebugSettings.KeyEventTrace { @@ -189,14 +197,14 @@ func (fn *Node) Init() { return } if fn.Name == externalFilesName { - files := fn.FileRoot.externalFiles + files := fn.FileRoot().externalFiles for _, fi := range files { tree.AddNew(p, fi, func() Filer { - return tree.NewOfType(fn.FileRoot.FileNodeType).(Filer) + return tree.NewOfType(fn.FileRoot().FileNodeType).(Filer) }, func(wf Filer) { w := wf.AsFileNode() + w.Root = fn.Root w.NeedsLayout() - w.FileRoot = fn.FileRoot w.Filepath = core.Filename(fi) w.Info.Mode = os.ModeIrregular w.Info.VCS = vcs.Stored @@ -207,25 +215,25 @@ func (fn *Node) Init() { if !fn.IsDir() || fn.IsIrregular() { return } - if !((fn.FileRoot.inOpenAll && !fn.Info.IsHidden()) || fn.FileRoot.isDirOpen(fn.Filepath)) { + if !((fn.FileRoot().inOpenAll && !fn.Info.IsHidden()) || fn.FileRoot().isDirOpen(fn.Filepath)) { return } repo, _ := fn.Repo() files := fn.dirFileList() for _, fi := range files { fpath := filepath.Join(string(fn.Filepath), fi.Name()) - if fn.FileRoot.FilterFunc != nil && !fn.FileRoot.FilterFunc(fpath, fi) { + if fn.FileRoot().FilterFunc != nil && !fn.FileRoot().FilterFunc(fpath, fi) { continue } tree.AddNew(p, fi.Name(), func() Filer { - return tree.NewOfType(fn.FileRoot.FileNodeType).(Filer) + return tree.NewOfType(fn.FileRoot().FileNodeType).(Filer) }, func(wf Filer) { w := wf.AsFileNode() + w.Root = fn.Root w.NeedsLayout() - w.FileRoot = fn.FileRoot w.Filepath = core.Filename(fpath) w.This.(Filer).GetFileInfo() - if w.FileRoot.FS == nil { + if w.FileRoot().FS == nil { if w.IsDir() && repo == nil { w.detectVCSRepo(true) // update files } @@ -284,10 +292,10 @@ func (fn *Node) isAutoSave() bool { // RelativePath returns the relative path from root for this node func (fn *Node) RelativePath() string { - if fn.IsIrregular() || fn.FileRoot == nil { + if fn.IsIrregular() || fn.FileRoot() == nil { return fn.Name } - return fsx.RelativeFilePath(string(fn.Filepath), string(fn.FileRoot.Filepath)) + return fsx.RelativeFilePath(string(fn.Filepath), string(fn.FileRoot().Filepath)) } // dirFileList returns the list of files in this directory, @@ -297,14 +305,16 @@ func (fn *Node) dirFileList() []fs.FileInfo { var files []fs.FileInfo var dirs []fs.FileInfo // for DirsOnTop mode var di []fs.DirEntry - if fn.FileRoot.FS == nil { + isFS := false + if fn.FileRoot().FS == nil { di = errors.Log1(os.ReadDir(path)) } else { - di = errors.Log1(fs.ReadDir(fn.FileRoot.FS, path)) + isFS = true + di = errors.Log1(fs.ReadDir(fn.FileRoot().FS, path)) } for _, d := range di { info := errors.Log1(d.Info()) - if fn.FileRoot.DirsOnTop { + if fn.FileRoot().DirsOnTop { if d.IsDir() { dirs = append(dirs, info) } else { @@ -314,30 +324,35 @@ func (fn *Node) dirFileList() []fs.FileInfo { files = append(files, info) } } - doModSort := fn.FileRoot.SortByModTime + doModSort := fn.FileRoot().SortByModTime if doModSort { - doModSort = !fn.FileRoot.dirSortByName(core.Filename(path)) + doModSort = !fn.FileRoot().dirSortByName(core.Filename(path)) } else { - doModSort = fn.FileRoot.dirSortByModTime(core.Filename(path)) + doModSort = fn.FileRoot().dirSortByModTime(core.Filename(path)) } - if fn.FileRoot.DirsOnTop { + if fn.FileRoot().DirsOnTop { if doModSort { - sortByModTime(dirs) - sortByModTime(files) + sortByModTime(dirs, isFS) // note: FS = ascending, otherwise descending + sortByModTime(files, isFS) } files = append(dirs, files...) } else { if doModSort { - sortByModTime(files) + sortByModTime(files, isFS) } } return files } -func sortByModTime(files []fs.FileInfo) { +// sortByModTime sorts by _reverse_ mod time (newest first) +func sortByModTime(files []fs.FileInfo, ascending bool) { slices.SortFunc(files, func(a, b fs.FileInfo) int { - return a.ModTime().Compare(b.ModTime()) + if ascending { + return a.ModTime().Compare(b.ModTime()) + } else { + return b.ModTime().Compare(a.ModTime()) + } }) } @@ -371,7 +386,7 @@ func (fn *Node) InitFileInfo() error { return nil } var err error - if fn.FileRoot.FS == nil { // deal with symlinks + if fn.FileRoot().FS == nil { // deal with symlinks ls, err := os.Lstat(string(fn.Filepath)) if errors.Log(err) != nil { return err @@ -387,7 +402,7 @@ func (fn *Node) InitFileInfo() error { } err = fn.Info.InitFile(string(fn.Filepath)) } else { - err = fn.Info.InitFileFS(fn.FileRoot.FS, string(fn.Filepath)) + err = fn.Info.InitFileFS(fn.FileRoot().FS, string(fn.Filepath)) } if err != nil { emsg := fmt.Errorf("filetree.Node InitFileInfo Path %q: Error: %v", fn.Filepath, err) @@ -431,7 +446,7 @@ func (fn *Node) OnClose() { if !fn.IsDir() { return } - fn.FileRoot.setDirClosed(fn.Filepath) + fn.FileRoot().setDirClosed(fn.Filepath) } func (fn *Node) CanOpen() bool { @@ -443,7 +458,7 @@ func (fn *Node) openDir() { if !fn.IsDir() { return } - fn.FileRoot.setDirOpen(fn.Filepath) + fn.FileRoot().setDirOpen(fn.Filepath) fn.Update() } @@ -458,15 +473,15 @@ func (fn *Node) sortBys(modTime bool) { //types:add // sortBy determines how to sort the files in the directory -- default is alpha by name, // optionally can be sorted by modification time. func (fn *Node) sortBy(modTime bool) { - fn.FileRoot.setDirSortBy(fn.Filepath, modTime) + fn.FileRoot().setDirSortBy(fn.Filepath, modTime) fn.Update() } // openAll opens all directories under this one func (fn *Node) openAll() { //types:add - fn.FileRoot.inOpenAll = true // causes chaining of opening + fn.FileRoot().inOpenAll = true // causes chaining of opening fn.Tree.OpenAll() - fn.FileRoot.inOpenAll = false + fn.FileRoot().inOpenAll = false } // OpenBuf opens the file in its buffer if it is not already open. @@ -499,7 +514,7 @@ func (fn *Node) removeFromExterns() { //types:add if !sn.isExternal() { return } - sn.FileRoot.removeExternalFile(string(sn.Filepath)) + sn.FileRoot().removeExternalFile(string(sn.Filepath)) sn.closeBuf() sn.Delete() }) diff --git a/filetree/search.go b/filetree/search.go index 911b646139..7c3c5ae23e 100644 --- a/filetree/search.go +++ b/filetree/search.go @@ -77,7 +77,7 @@ func Search(start *Node, find string, ignoreCase, regExp bool, loc FindLocation, // fmt.Printf("dir: %v closed\n", sfn.FPath) return tree.Break // don't go down into closed directories! } - if sfn.IsDir() || sfn.IsExec() || sfn.Info.Kind == "octet-stream" || sfn.isAutoSave() { + if sfn.IsDir() || sfn.IsExec() || sfn.Info.Kind == "octet-stream" || sfn.isAutoSave() || sfn.Info.Generated { // fmt.Printf("dir: %v opened\n", sfn.Nm) return tree.Continue } @@ -163,6 +163,9 @@ func findAll(start *Node, find string, ignoreCase, regExp bool, langs []fileinfo if strings.HasSuffix(info.Name(), ".code") { // exclude self return nil } + if fileinfo.IsGeneratedFile(path) { + return nil + } if len(langs) > 0 { mtyp, _, err := fileinfo.MimeFromFile(path) if err != nil { diff --git a/filetree/tree.go b/filetree/tree.go index f0d561782c..93919f080d 100644 --- a/filetree/tree.go +++ b/filetree/tree.go @@ -26,6 +26,20 @@ const ( externalFilesName = "[external files]" ) +// Treer is an interface for getting the Root node if it implements [Treer]. +type Treer interface { + AsFileTree() *Tree +} + +// AsTree returns the given value as a [Tree] if it has +// an AsFileTree() method, or nil otherwise. +func AsTree(n tree.Node) *Tree { + if t, ok := n.(Treer); ok { + return t.AsFileTree() + } + return nil +} + // Tree is the root widget of a file tree representing files in a given directory // (and subdirectories thereof), and has some overall management state for how to // view things. @@ -80,16 +94,19 @@ type Tree struct { func (ft *Tree) Init() { ft.Node.Init() - ft.FileRoot = ft + ft.Root = ft ft.FileNodeType = types.For[Node]() ft.OpenDepth = 4 ft.DirsOnTop = true ft.FirstMaker(func(p *tree.Plan) { + if len(ft.externalFiles) == 0 { + return + } tree.AddNew(p, externalFilesName, func() Filer { return tree.NewOfType(ft.FileNodeType).(Filer) }, func(wf Filer) { w := wf.AsFileNode() - w.FileRoot = ft + w.Root = ft.Root w.Filepath = externalFilesName w.Info.Mode = os.ModeDir w.Info.VCS = vcs.Stored @@ -110,6 +127,10 @@ func (fv *Tree) Destroy() { fv.Tree.Destroy() } +func (ft *Tree) AsFileTree() *Tree { + return ft +} + // OpenPath opens the filetree at the given os file system directory path. // It reads all the files at the given path into this tree. // Only paths listed in [Tree.Dirs] will be opened. @@ -152,7 +173,7 @@ func (ft *Tree) OpenPathFS(fsys fs.FS, path string) *Tree { } // UpdatePath updates the tree at the directory level for given path -// and everything below it. It flags that it needs render update, +// and everything below it. It flags that it needs render update, // but if a deletion or insertion happened, then NeedsLayout should also // be called. func (ft *Tree) UpdatePath(path string) { @@ -334,8 +355,13 @@ func (ft *Tree) AddExternalFile(fpath string) (*Node, error) { if has, _ := ft.hasExternalFile(pth); has { return ft.externalNodeByPath(pth) } + newExt := len(ft.externalFiles) == 0 ft.externalFiles = append(ft.externalFiles, pth) - ft.Child(0).(Filer).AsFileNode().Update() + if newExt { + ft.Update() + } else { + ft.Child(0).(Filer).AsFileNode().Update() + } return ft.externalNodeByPath(pth) } diff --git a/filetree/typegen.go b/filetree/typegen.go index 376b56a57c..547cb5d1fb 100644 --- a/filetree/typegen.go +++ b/filetree/typegen.go @@ -3,14 +3,16 @@ package filetree import ( + "io/fs" + "cogentcore.org/core/base/vcs" "cogentcore.org/core/tree" "cogentcore.org/core/types" ) -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/filetree.Filer", IDName: "filer", Doc: "Filer is an interface for file tree file actions that all [Node]s satisfy.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Methods: []types.Method{{Name: "AsFileNode", Doc: "AsFileNode returns the [Node]", Returns: []string{"Node"}}, {Name: "RenameFiles", Doc: "RenameFiles renames any selected files."}}}) +var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/filetree.Filer", IDName: "filer", Doc: "Filer is an interface for file tree file actions that all [Node]s satisfy.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Methods: []types.Method{{Name: "AsFileNode", Doc: "AsFileNode returns the [Node]", Returns: []string{"Node"}}, {Name: "RenameFiles", Doc: "RenameFiles renames any selected files."}, {Name: "GetFileInfo", Doc: "GetFileInfo updates the .Info for this file", Returns: []string{"error"}}, {Name: "OpenFile", Doc: "OpenFile opens the file for node. This is called by OpenFilesDefault", Returns: []string{"error"}}}}) -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/filetree.Node", IDName: "node", Doc: "Node represents a file in the file system, as a [core.Tree] node.\nThe name of the node is the name of the file.\nFolders have children containing further nodes.", Directives: []types.Directive{{Tool: "core", Directive: "embedder"}}, Methods: []types.Method{{Name: "Cut", Doc: "Cut copies the selected files to the clipboard and then deletes them.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "Paste", Doc: "Paste inserts files from the clipboard.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "OpenFilesDefault", Doc: "OpenFilesDefault opens selected files with default app for that file type (os defined).\nruns open on Mac, xdg-open on Linux, and start on Windows", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "duplicateFiles", Doc: "duplicateFiles makes a copy of selected files", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "deleteFiles", Doc: "deletes any selected files or directories. If any directory is selected,\nall files and subdirectories in that directory are also deleted.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "RenameFiles", Doc: "renames any selected files", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "RenameFile", Doc: "RenameFile renames file to new name", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"newpath"}, Returns: []string{"error"}}, {Name: "newFiles", Doc: "newFiles makes a new file in selected directory", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"filename", "addToVCS"}}, {Name: "newFile", Doc: "newFile makes a new file in this directory node", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"filename", "addToVCS"}}, {Name: "newFolders", Doc: "makes a new folder in the given selected directory", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"foldername"}}, {Name: "newFolder", Doc: "newFolder makes a new folder (directory) in this directory node", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"foldername"}}, {Name: "showFileInfo", Doc: "Shows file information about selected file(s)", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "sortBys", Doc: "sortBys determines how to sort the selected files in the directory.\nDefault is alpha by name, optionally can be sorted by modification time.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"modTime"}}, {Name: "openAll", Doc: "openAll opens all directories under this one", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "removeFromExterns", Doc: "removeFromExterns removes file from list of external files", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "addToVCSSelected", Doc: "addToVCSSelected adds selected files to version control system", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "deleteFromVCSSelected", Doc: "deleteFromVCSSelected removes selected files from version control system", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "commitToVCSSelected", Doc: "commitToVCSSelected commits to version control system based on last selected file", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "revertVCSSelected", Doc: "revertVCSSelected removes selected files from version control system", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "diffVCSSelected", Doc: "diffVCSSelected shows the diffs between two versions of selected files, given by the\nrevision specifiers -- if empty, defaults to A = current HEAD, B = current WC file.\n-1, -2 etc also work as universal ways of specifying prior revisions.\nDiffs are shown in a DiffEditorDialog.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"rev_a", "rev_b"}}, {Name: "logVCSSelected", Doc: "logVCSSelected shows the VCS log of commits for selected files.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "blameVCSSelected", Doc: "blameVCSSelected shows the VCS blame report for this file, reporting for each line\nthe revision and author of the last change.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}}, Embeds: []types.Field{{Name: "Tree"}}, Fields: []types.Field{{Name: "Filepath", Doc: "Filepath is the full path to this file."}, {Name: "Info", Doc: "Info is the full standard file info about this file."}, {Name: "Buffer", Doc: "Buffer is the file buffer for editing this file."}, {Name: "FileRoot", Doc: "FileRoot is the root [Tree] of the tree, which has global state."}, {Name: "DirRepo", Doc: "DirRepo is the version control system repository for this directory,\nonly non-nil if this is the highest-level directory in the tree under vcs control."}, {Name: "repoFiles", Doc: "repoFiles has the version control system repository file status,\nproviding a much faster way to get file status, vs. the repo.Status\ncall which is exceptionally slow."}}}) +var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/filetree.Node", IDName: "node", Doc: "Node represents a file in the file system, as a [core.Tree] node.\nThe name of the node is the name of the file.\nFolders have children containing further nodes.", Directives: []types.Directive{{Tool: "core", Directive: "embedder"}}, Methods: []types.Method{{Name: "Cut", Doc: "Cut copies the selected files to the clipboard and then deletes them.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "Paste", Doc: "Paste inserts files from the clipboard.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "OpenFilesDefault", Doc: "OpenFilesDefault opens selected files with default app for that file type (os defined).\nruns open on Mac, xdg-open on Linux, and start on Windows", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "duplicateFiles", Doc: "duplicateFiles makes a copy of selected files", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "deleteFiles", Doc: "deletes any selected files or directories. If any directory is selected,\nall files and subdirectories in that directory are also deleted.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "RenameFiles", Doc: "renames any selected files", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "RenameFile", Doc: "RenameFile renames file to new name", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"newpath"}, Returns: []string{"error"}}, {Name: "newFiles", Doc: "newFiles makes a new file in selected directory", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"filename", "addToVCS"}}, {Name: "newFile", Doc: "newFile makes a new file in this directory node", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"filename", "addToVCS"}}, {Name: "newFolders", Doc: "makes a new folder in the given selected directory", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"foldername"}}, {Name: "newFolder", Doc: "newFolder makes a new folder (directory) in this directory node", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"foldername"}}, {Name: "showFileInfo", Doc: "Shows file information about selected file(s)", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "sortBys", Doc: "sortBys determines how to sort the selected files in the directory.\nDefault is alpha by name, optionally can be sorted by modification time.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"modTime"}}, {Name: "openAll", Doc: "openAll opens all directories under this one", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "removeFromExterns", Doc: "removeFromExterns removes file from list of external files", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "addToVCSSelected", Doc: "addToVCSSelected adds selected files to version control system", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "deleteFromVCSSelected", Doc: "deleteFromVCSSelected removes selected files from version control system", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "commitToVCSSelected", Doc: "commitToVCSSelected commits to version control system based on last selected file", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "revertVCSSelected", Doc: "revertVCSSelected removes selected files from version control system", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "diffVCSSelected", Doc: "diffVCSSelected shows the diffs between two versions of selected files, given by the\nrevision specifiers -- if empty, defaults to A = current HEAD, B = current WC file.\n-1, -2 etc also work as universal ways of specifying prior revisions.\nDiffs are shown in a DiffEditorDialog.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"rev_a", "rev_b"}}, {Name: "logVCSSelected", Doc: "logVCSSelected shows the VCS log of commits for selected files.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "blameVCSSelected", Doc: "blameVCSSelected shows the VCS blame report for this file, reporting for each line\nthe revision and author of the last change.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}}, Embeds: []types.Field{{Name: "Tree"}}, Fields: []types.Field{{Name: "Filepath", Doc: "Filepath is the full path to this file."}, {Name: "Info", Doc: "Info is the full standard file info about this file."}, {Name: "Buffer", Doc: "Buffer is the file buffer for editing this file."}, {Name: "DirRepo", Doc: "DirRepo is the version control system repository for this directory,\nonly non-nil if this is the highest-level directory in the tree under vcs control."}, {Name: "repoFiles", Doc: "repoFiles has the version control system repository file status,\nproviding a much faster way to get file status, vs. the repo.Status\ncall which is exceptionally slow."}}}) // NewNode returns a new [Node] with the given optional parent: // Node represents a file in the file system, as a [core.Tree] node. @@ -35,7 +37,7 @@ func AsNode(n tree.Node) *Node { // AsNode satisfies the [NodeEmbedder] interface func (t *Node) AsNode() *Node { return t } -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/filetree.Tree", IDName: "tree", Doc: "Tree is the root widget of a file tree representing files in a given directory\n(and subdirectories thereof), and has some overall management state for how to\nview things.", Embeds: []types.Field{{Name: "Node"}}, Fields: []types.Field{{Name: "externalFiles", Doc: "externalFiles are external files outside the root path of the tree.\nThey are stored in terms of their absolute paths. These are shown\nin the first sub-node if present; use [Tree.AddExternalFile] to add one."}, {Name: "Dirs", Doc: "records state of directories within the tree (encoded using paths relative to root),\ne.g., open (have been opened by the user) -- can persist this to restore prior view of a tree"}, {Name: "DirsOnTop", Doc: "if true, then all directories are placed at the top of the tree.\nOtherwise everything is mixed."}, {Name: "FileNodeType", Doc: "type of node to create; defaults to [Node] but can use custom node types"}, {Name: "inOpenAll", Doc: "if true, we are in midst of an OpenAll call; nodes should open all dirs"}, {Name: "watcher", Doc: "change notify for all dirs"}, {Name: "doneWatcher", Doc: "channel to close watcher watcher"}, {Name: "watchedPaths", Doc: "map of paths that have been added to watcher; only active if bool = true"}, {Name: "lastWatchUpdate", Doc: "last path updated by watcher"}, {Name: "lastWatchTime", Doc: "timestamp of last update"}}}) +var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/filetree.Tree", IDName: "tree", Doc: "Tree is the root widget of a file tree representing files in a given directory\n(and subdirectories thereof), and has some overall management state for how to\nview things.", Embeds: []types.Field{{Name: "Node"}}, Fields: []types.Field{{Name: "externalFiles", Doc: "externalFiles are external files outside the root path of the tree.\nThey are stored in terms of their absolute paths. These are shown\nin the first sub-node if present; use [Tree.AddExternalFile] to add one."}, {Name: "Dirs", Doc: "Dirs records state of directories within the tree (encoded using paths relative to root),\ne.g., open (have been opened by the user) -- can persist this to restore prior view of a tree"}, {Name: "DirsOnTop", Doc: "DirsOnTop indicates whether all directories are placed at the top of the tree.\nOtherwise everything is mixed. This is the default."}, {Name: "SortByModTime", Doc: "SortByModTime causes files to be sorted by modification time by default.\nOtherwise it is a per-directory option."}, {Name: "FileNodeType", Doc: "FileNodeType is the type of node to create; defaults to [Node] but can use custom node types"}, {Name: "FilterFunc", Doc: "FilterFunc, if set, determines whether to include the given node in the tree.\nreturn true to include, false to not. This applies to files and directories alike."}, {Name: "FS", Doc: "FS is the file system we are browsing, if it is an FS (nil = os filesystem)"}, {Name: "inOpenAll", Doc: "inOpenAll indicates whether we are in midst of an OpenAll call; nodes should open all dirs."}, {Name: "watcher", Doc: "watcher does change notify for all dirs"}, {Name: "doneWatcher", Doc: "doneWatcher is channel to close watcher watcher"}, {Name: "watchedPaths", Doc: "watchedPaths is map of paths that have been added to watcher; only active if bool = true"}, {Name: "lastWatchUpdate", Doc: "lastWatchUpdate is last path updated by watcher"}, {Name: "lastWatchTime", Doc: "lastWatchTime is timestamp of last update"}}}) // NewTree returns a new [Tree] with the given optional parent: // Tree is the root widget of a file tree representing files in a given directory @@ -44,14 +46,31 @@ var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/filetree.Tree", IDN func NewTree(parent ...tree.Node) *Tree { return tree.New[Tree](parent...) } // SetDirsOnTop sets the [Tree.DirsOnTop]: -// if true, then all directories are placed at the top of the tree. -// Otherwise everything is mixed. +// DirsOnTop indicates whether all directories are placed at the top of the tree. +// Otherwise everything is mixed. This is the default. func (t *Tree) SetDirsOnTop(v bool) *Tree { t.DirsOnTop = v; return t } +// SetSortByModTime sets the [Tree.SortByModTime]: +// SortByModTime causes files to be sorted by modification time by default. +// Otherwise it is a per-directory option. +func (t *Tree) SetSortByModTime(v bool) *Tree { t.SortByModTime = v; return t } + // SetFileNodeType sets the [Tree.FileNodeType]: -// type of node to create; defaults to [Node] but can use custom node types +// FileNodeType is the type of node to create; defaults to [Node] but can use custom node types func (t *Tree) SetFileNodeType(v *types.Type) *Tree { t.FileNodeType = v; return t } +// SetFilterFunc sets the [Tree.FilterFunc]: +// FilterFunc, if set, determines whether to include the given node in the tree. +// return true to include, false to not. This applies to files and directories alike. +func (t *Tree) SetFilterFunc(v func(path string, info fs.FileInfo) bool) *Tree { + t.FilterFunc = v + return t +} + +// SetFS sets the [Tree.FS]: +// FS is the file system we are browsing, if it is an FS (nil = os filesystem) +func (t *Tree) SetFS(v fs.FS) *Tree { t.FS = v; return t } + var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/filetree.VCSLog", IDName: "vcs-log", Doc: "VCSLog is a widget that represents VCS log data.", Embeds: []types.Field{{Name: "Frame"}}, Fields: []types.Field{{Name: "Log", Doc: "current log"}, {Name: "File", Doc: "file that this is a log of -- if blank then it is entire repository"}, {Name: "Since", Doc: "date expression for how long ago to include log entries from"}, {Name: "Repo", Doc: "version control system repository"}, {Name: "revisionA", Doc: "revision A -- defaults to HEAD"}, {Name: "revisionB", Doc: "revision B -- blank means current working copy"}, {Name: "setA", Doc: "double-click will set the A revision -- else B"}, {Name: "arev"}, {Name: "brev"}, {Name: "atf"}, {Name: "btf"}}}) // NewVCSLog returns a new [VCSLog] with the given optional parent: diff --git a/filetree/vcs.go b/filetree/vcs.go index 6e9b1fc5cb..db9a72e938 100644 --- a/filetree/vcs.go +++ b/filetree/vcs.go @@ -22,7 +22,7 @@ import ( // FirstVCS returns the first VCS repository starting from this node and going down. // also returns the node having that repository func (fn *Node) FirstVCS() (vcs.Repo, *Node) { - if fn.FileRoot.FS != nil { + if fn.FileRoot().FS != nil { return nil, nil } var repo vcs.Repo @@ -73,7 +73,11 @@ func (fn *Node) detectVCSRepo(updateFiles bool) bool { // and the node for the directory where the repo is based. // Goes up the tree until a repository is found. func (fn *Node) Repo() (vcs.Repo, *Node) { - if fn.isExternal() || fn.FileRoot.FS != nil { + fr := fn.FileRoot() + if fr == nil { + return nil, nil + } + if fn.isExternal() || fr == nil || fr.FS != nil { return nil, nil } if fn.DirRepo != nil { diff --git a/gpu/README.md b/gpu/README.md index 53000fbb83..fae96005a9 100644 --- a/gpu/README.md +++ b/gpu/README.md @@ -152,13 +152,15 @@ Here's how it works: * Each WebGPU `Pipeline` holds **1** compute `shader` program, which is equivalent to a `kernel` in CUDA. This is the basic unit of computation, accomplishing one parallel sweep of processing across some number of identical data structures. -* You must organize at the outset your `Vars` and `Values` in the `System` to hold the data structures your shaders operate on. In general, you want to have a single static set of Vars that cover everything you'll need, and different shaders can operate on different subsets of these. You want to minimize the amount of memory transfer. +* The `Vars` and `Values` in the `System` hold all the data structures your shaders operate on, and must be configured and data uploaded before running. In general, it is best to have a single static set of Vars that cover everything you'll need, and different shaders can operate on different subsets of these, minimizing the amount of memory transfer. -* Because the `Queue.Submit` call is by far the most expensive call in WebGPU, you want to minimize those. This means you want to combine as much of your computation into one big Command sequence, with calls to various different `Pipeline` shaders (which can all be put in one command buffer) that gets submitted *once*, rather than submitting separate commands for each shader. Ideally this also involves combining memory transfers to / from the GPU in the same command buffer as well. +* Because the `Queue.Submit` call is by far the most expensive call in WebGPU, it should be minimized. This means combining as much of your computation into one big Command sequence, with calls to various different `Pipeline` shaders (which can all be put in one command buffer) that gets submitted *once*, rather than submitting separate commands for each shader. Ideally this also involves combining memory transfers to / from the GPU in the same command buffer as well. -* There are no explicit sync mechanisms in WebGPU, but it is designed so that shader compute is automatically properly synced with prior and subsequent memory transfer commands, so it automatically does the right thing for most use cases. +* There are no explicit sync mechanisms on the command, CPU side WebGPU (they only exist in the WGSL shaders), but it is designed so that shader compute is automatically properly synced with prior and subsequent memory transfer commands, so it automatically does the right thing for most use cases. -* Compute is particularly taxing on memory transfer in general, and as far as I can tell, the best strategy is to rely on the optimized `WriteBuffer` command to transfer from CPU to GPU, and then use a staging buffer to read data back from the GPU. E.g., see [this reddit post](https://www.reddit.com/r/wgpu/comments/13zqe1u/can_someone_please_explain_to_me_the_whole_buffer/). Critically, the write commands are queued and any staging buffers are managed internally, so it shouldn't be much slower than manually doing all the staging. For reading, we have to implement everything ourselves, and here it is critical to batch the `ReadSync` calls for all relevant values, so they all happen at once. Use ad-hoc `ValueGroup`s to organize these batched read operations efficiently for the different groups of values that need to be read back in the different compute stages. +* Compute is particularly taxing on memory transfer in general, and overall the best strategy is to rely on the optimized `WriteBuffer` command to transfer from CPU to GPU, and then use a staging buffer to read data back from the GPU. E.g., see [this reddit post](https://www.reddit.com/r/wgpu/comments/13zqe1u/can_someone_please_explain_to_me_the_whole_buffer/). Critically, the write commands are queued and any staging buffers are managed internally, so it shouldn't be much slower than manually doing all the staging. For reading, we have to implement everything ourselves, and here it is critical to batch the `ReadSync` calls for all relevant values, so they all happen at once. Use ad-hoc `ValueGroup`s to organize these batched read operations efficiently for the different groups of values that need to be read back in the different compute stages. + +* For large numbers of items to compute, there is a strong constraint that only 65_536 (2^16) workgroups can be submitted, _per dimension_ at a time. For unstructured 1D indexing, we typically use `[64,1,1]` for the workgroup size (which must be hard-coded into the shader and coordinated with the Go side code), which gives 64 * 65_536 = 4_194_304 max items. For more than that number, more than 1 needs to be used for the second dimension. The NumWorkgroups* functions return appropriate sizes with a minimum remainder. See [examples/compute](examples/compute) for the logic needed to get the overall global index from the workgroup sizes. # Gamma Correction (sRGB vs Linear) and Headless / Offscreen Rendering diff --git a/gpu/compute.go b/gpu/compute.go index da7ee8213b..5bc4e18dc0 100644 --- a/gpu/compute.go +++ b/gpu/compute.go @@ -7,6 +7,8 @@ package gpu import ( "fmt" "math" + "runtime" + "sync" "cogentcore.org/core/base/errors" "github.com/cogentcore/webgpu/wgpu" @@ -25,9 +27,13 @@ type ComputeSystem struct { // Access through the System.Vars() method. vars Vars - // ComputePipelines by name + // ComputePipelines by name. ComputePipelines map[string]*ComputePipeline + // ComputeEncoder is the compute specific command encoder for the + // current [BeginComputePass], and released in [EndComputePass]. + ComputeEncoder *wgpu.ComputePassEncoder + // CommandEncoder is the command encoder created in // [BeginComputePass], and released in [EndComputePass]. CommandEncoder *wgpu.CommandEncoder @@ -116,22 +122,32 @@ func (sy *ComputeSystem) NewCommandEncoder() (*wgpu.CommandEncoder, error) { // to start the compute pass, returning the encoder object // to which further compute commands should be added. // Call [EndComputePass] when done. +// If an existing [ComputeSystem.ComputeEncoder] is already set from +// a prior BeginComputePass call, then that is returned, so this +// is safe and efficient to call for every compute shader dispatch, +// where the first call will create and the rest add to the ongoing job. func (sy *ComputeSystem) BeginComputePass() (*wgpu.ComputePassEncoder, error) { + if sy.ComputeEncoder != nil { + return sy.ComputeEncoder, nil + } cmd, err := sy.NewCommandEncoder() if errors.Log(err) != nil { return nil, err } sy.CommandEncoder = cmd - return cmd.BeginComputePass(nil), nil // note: optional name in the descriptor + sy.ComputeEncoder = cmd.BeginComputePass(nil) // optional name in the encoder + return sy.ComputeEncoder, nil } // EndComputePass submits the current compute commands to the device -// Queue and releases the [CommandEncoder] and the given -// ComputePassEncoder. You must call ce.End prior to calling this. +// Queue and releases the [ComputeSystem.CommandEncoder] and +// [ComputeSystem.ComputeEncoder]. You must call ce.End prior to calling this. // Can insert other commands after ce.End, e.g., to copy data back // from the GPU, prior to calling EndComputePass. -func (sy *ComputeSystem) EndComputePass(ce *wgpu.ComputePassEncoder) error { +func (sy *ComputeSystem) EndComputePass() error { + ce := sy.ComputeEncoder cmd := sy.CommandEncoder + sy.ComputeEncoder = nil sy.CommandEncoder = nil ce.Release() // must happen before Finish cmdBuffer, err := cmd.Finish(nil) @@ -144,11 +160,47 @@ func (sy *ComputeSystem) EndComputePass(ce *wgpu.ComputePassEncoder) error { return nil } -// Warps returns the number of warps (work goups of compute threads) -// that is sufficient to compute n elements, given specified number -// of threads per this dimension. -// It just rounds up to nearest even multiple of n divided by threads: -// Ceil(n / threads) -func Warps(n, threads int) int { - return int(math.Ceil(float64(n) / float64(threads))) +// NumThreads is the number of threads to use for parallel threading, +// in the [VectorizeFunc] that is used for CPU versions of GPU functions. +// The default of 0 causes the [runtime.GOMAXPROCS] to be used. +var NumThreads = 0 + +// DefaultNumThreads returns the default number of threads to use: +// NumThreads if non-zero, otherwise [runtime.GOMAXPROCS]. +func DefaultNumThreads() int { + if NumThreads > 0 { + return NumThreads + } + return runtime.GOMAXPROCS(0) +} + +// VectorizeFunc runs given GPU kernel function taking a uint32 index +// on the CPU, using given number of threads with goroutines, for n iterations. +// If threads is 0, then GOMAXPROCS is used. +func VectorizeFunc(threads, n int, fun func(idx uint32)) { + if threads == 0 { + threads = DefaultNumThreads() + } + if threads <= 1 { + for idx := range n { + fun(uint32(idx)) + } + return + } + nper := int(math.Ceil(float64(n) / float64(threads))) + wait := sync.WaitGroup{} + for start := 0; start < n; start += nper { + end := start + nper + if end > n { + end = n + } + wait.Add(1) + go func() { + for idx := start; idx < end; idx++ { + fun(uint32(idx)) + } + wait.Done() + }() + } + wait.Wait() } diff --git a/gpu/compute_test.go b/gpu/compute_test.go new file mode 100644 index 0000000000..d4d4cec960 --- /dev/null +++ b/gpu/compute_test.go @@ -0,0 +1,58 @@ +// Copyright (c) 2024, Cogent Core. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gpu + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNumWorkgroups(t *testing.T) { + nx, ny := NumWorkgroups1D(4_194_304, 64) + assert.Equal(t, 65536, nx) + assert.Equal(t, 1, ny) + assert.Equal(t, 4_194_304, nx*ny*64) + + nx, ny = NumWorkgroups1D(4_194_304+64, 64) + assert.Equal(t, 32769, nx) + assert.Equal(t, 2, ny) + assert.GreaterOrEqual(t, nx*ny*64, 4_194_304+64) + + nx, ny = NumWorkgroups1D(4_194_304+90, 64) + assert.Equal(t, 32769, nx) + assert.Equal(t, 2, ny) + assert.GreaterOrEqual(t, nx*ny*64, 4_194_304+90) + + nx, ny = NumWorkgroups1D(4_194_304+129, 64) + assert.Equal(t, 32770, nx) + assert.Equal(t, 2, ny) + assert.GreaterOrEqual(t, nx*ny*64, 4_194_304+129) + + nx, ny = NumWorkgroups1D(4_194_304-64, 64) + assert.Equal(t, 65535, nx) + assert.Equal(t, 1, ny) + assert.GreaterOrEqual(t, nx*ny*64, 4_194_304-64) + + nx, ny = NumWorkgroups1D(4_194_304-90, 64) + assert.Equal(t, 65535, nx) + assert.Equal(t, 1, ny) + assert.GreaterOrEqual(t, nx*ny*64, 4_194_304-90) + + nx, ny = NumWorkgroups1D(4_194_304*64, 64) + assert.Equal(t, 65536, nx) + assert.Equal(t, 64, ny) + assert.GreaterOrEqual(t, nx*ny*64, 4_194_304*64) + + nx, ny = NumWorkgroups1D(4_194_304*64, 64) + assert.Equal(t, 65536, nx) + assert.Equal(t, 64, ny) + assert.GreaterOrEqual(t, nx*ny*64, 4_194_304*64) + + nx, ny = NumWorkgroups2D(4_194_304*64, 4, 16) + assert.Equal(t, 65536, nx) + assert.Equal(t, 64, ny) + assert.GreaterOrEqual(t, nx*ny*64, 4_194_304*64) +} diff --git a/gpu/cpipeline.go b/gpu/cpipeline.go index 290919c9f6..2c3ef36eb2 100644 --- a/gpu/cpipeline.go +++ b/gpu/cpipeline.go @@ -6,6 +6,7 @@ package gpu import ( "io/fs" + "math" "path" "cogentcore.org/core/base/errors" @@ -43,6 +44,7 @@ func NewComputePipelineShaderFS(fsys fs.FS, fname string, sy *ComputeSystem) *Co sh := pl.AddShader(name) errors.Log(sh.OpenFileFS(fsys, fname)) pl.AddEntry(sh, ComputeShader, "main") + sy.ComputePipelines[pl.Name] = pl return pl } @@ -73,9 +75,45 @@ func (pl *ComputePipeline) Dispatch(ce *wgpu.ComputePassEncoder, nx, ny, nz int) // (X) dimension, for given number *elements* (threads) per warp (typically 64). // See [Dispatch] for full info. // This is just a convenience method for common 1D case that calls -// the Warps method for you. +// the NumWorkgroups1D function with threads for you. func (pl *ComputePipeline) Dispatch1D(ce *wgpu.ComputePassEncoder, n, threads int) error { - return pl.Dispatch(ce, Warps(n, threads), 1, 1) + nx, ny := NumWorkgroups1D(n, threads) + return pl.Dispatch(ce, nx, ny, 1) +} + +// NumWorkgroups1D() returns the number of work groups of compute threads +// that is sufficient to compute n total elements, given specified number +// of threads in the x dimension, subject to constraint that no more than +// 65536 work groups can be deployed per dimension. +func NumWorkgroups1D(n, threads int) (nx, ny int) { + mxn := 65536 + ny = 1 + nx = int(math.Ceil(float64(n) / float64(threads))) + if nx <= 65536 { + return + } + xsz := mxn * threads + ny = int(math.Ceil(float64(n) / float64(xsz))) + nx = int(math.Ceil(float64(n) / float64(ny*threads))) + return +} + +// NumWorkgroups2D() returns the number of work groups of compute threads +// that is sufficient to compute n total elements, given specified number +// of threads per x, y dimension, subject to constraint that no more than +// 65536 work groups can be deployed per dimension. +func NumWorkgroups2D(n, x, y int) (nx, ny int) { + mxn := 65536 + sz := x * y + ny = 1 + nx = int(math.Ceil(float64(n) / float64(sz))) + if nx <= 65536 { + return + } + xsz := mxn * x // size of full x chunk + ny = int(math.Ceil(float64(n) / float64(xsz*y))) + nx = int(math.Ceil(float64(n) / float64(x*ny*y))) + return } // BindAllGroups binds the Current Value for all variables across all diff --git a/gpu/device.go b/gpu/device.go index 2d8a94dfb7..ecc6901e02 100644 --- a/gpu/device.go +++ b/gpu/device.go @@ -5,6 +5,8 @@ package gpu import ( + "fmt" + "cogentcore.org/core/base/errors" "github.com/cogentcore/webgpu/wgpu" ) @@ -38,14 +40,25 @@ func NewDevice(gpu *GPU) (*Device, error) { func NewComputeDevice(gpu *GPU) (*Device, error) { // we only request max buffer sizes so compute can go as big as it needs to limits := wgpu.DefaultLimits() - const maxv = 0xFFFFFFFF + // Per https://github.com/cogentcore/core/issues/1362 -- this may cause issues on "downlevel" + // hardware, so we may need to detect that. OTOH it probably won't be useful for compute anyway, + // but we can just sort that out later + // note: on web / chromium / dawn, limited to 10: https://issues.chromium.org/issues/366151398?pli=1 + limits.MaxStorageBuffersPerShaderStage = gpu.Limits.Limits.MaxStorageBuffersPerShaderStage + // fmt.Println("MaxStorageBuffersPerShaderStage:", gpu.Limits.Limits.MaxStorageBuffersPerShaderStage) // note: these limits are being processed and allow the MaxBufferSize to be the // controlling factor -- if we don't set these, then the slrand example doesn't // work above a smaller limit. - limits.MaxUniformBufferBindingSize = min(gpu.Limits.Limits.MaxUniformBufferBindingSize, maxv) - limits.MaxStorageBufferBindingSize = min(gpu.Limits.Limits.MaxStorageBufferBindingSize, maxv) + limits.MaxUniformBufferBindingSize = uint64(MemSizeAlignDown(int(gpu.Limits.Limits.MaxUniformBufferBindingSize), int(gpu.Limits.Limits.MinUniformBufferOffsetAlignment))) + + limits.MaxStorageBufferBindingSize = uint64(MemSizeAlignDown(int(gpu.Limits.Limits.MaxStorageBufferBindingSize), int(gpu.Limits.Limits.MinStorageBufferOffsetAlignment))) // note: this limit is not working properly: - limits.MaxBufferSize = min(gpu.Limits.Limits.MaxBufferSize, maxv) + limits.MaxBufferSize = uint64(MemSizeAlignDown(int(gpu.Limits.Limits.MaxBufferSize), int(gpu.Limits.Limits.MinStorageBufferOffsetAlignment))) + // limits.MaxBindGroups = gpu.Limits.Limits.MaxBindGroups // note: no point in changing -- web constraint + + if Debug { + fmt.Printf("Requesting sizes: MaxStorageBufferBindingSize: %X MaxBufferSize: %X\n", limits.MaxStorageBufferBindingSize, limits.MaxBufferSize) + } desc := wgpu.DeviceDescriptor{ RequiredLimits: &wgpu.RequiredLimits{ Limits: limits, diff --git a/gpu/examples/compute/compute.go b/gpu/examples/compute/compute.go index 5737215b71..500e578a60 100644 --- a/gpu/examples/compute/compute.go +++ b/gpu/examples/compute/compute.go @@ -11,7 +11,9 @@ import ( "runtime" "unsafe" + "cogentcore.org/core/base/timer" "cogentcore.org/core/gpu" + // "cogentcore.org/core/system/driver/web/jsfs" ) //go:embed squares.wgsl @@ -30,7 +32,19 @@ type Data struct { } func main() { - gpu.Debug = true + // errors.Log1(jsfs.Config(js.Global().Get("fs"))) // needed for printing etc to work + // time.Sleep(1 * time.Second) + // b := core.NewBody() + // bt := core.NewButton(b).SetText("Run Compute") + // bt.OnClick(func(e events.Event) { + compute() + // }) + // b.RunMainWindow() + // select {} +} + +func compute() { + // gpu.SetDebug(true) gp := gpu.NewComputeGPU() fmt.Printf("Running on GPU: %s\n", gp.DeviceName) @@ -42,8 +56,11 @@ func main() { vars := sy.Vars() sgp := vars.AddGroup(gpu.Storage) - n := 20 // note: not necc to spec up-front, but easier if so + // n := 16_000_000 // near max capacity on Mac M* + n := 200_000 // should fit in any webgpu threads := 64 + nx, ny := gpu.NumWorkgroups1D(n, threads) + fmt.Printf("workgroup sizes: %d, %d storage mem bytes: %X\n", nx, ny, n*int(unsafe.Sizeof(Data{}))) dv := sgp.AddStruct("Data", int(unsafe.Sizeof(Data{})), n, gpu.ComputeShader) @@ -59,18 +76,28 @@ func main() { } gpu.SetValueFrom(dvl, sd) - sgp.CreateReadBuffers() - - ce, _ := sy.BeginComputePass() - pl.Dispatch1D(ce, n, threads) - ce.End() - dvl.GPUToRead(sy.CommandEncoder) - sy.EndComputePass(ce) + gpuTmr := timer.Time{} + cpyTmr := timer.Time{} + gpuTmr.Start() + nItr := 1 + + for range nItr { + ce, _ := sy.BeginComputePass() + pl.Dispatch1D(ce, n, threads) + ce.End() + dvl.GPUToRead(sy.CommandEncoder) + sy.EndComputePass() + + cpyTmr.Start() + dvl.ReadSync() + cpyTmr.Stop() + gpu.ReadToBytes(dvl, sd) + } - dvl.ReadSync() - gpu.ReadToBytes(dvl, sd) + gpuTmr.Stop() - for i := 0; i < n; i++ { + mx := min(n, 10) + for i := 0; i < mx; i++ { tc := sd[i].A + sd[i].B td := tc * tc dc := sd[i].C - tc @@ -78,6 +105,7 @@ func main() { fmt.Printf("%d\t A: %g\t B: %g\t C: %g\t trg: %g\t D: %g \t trg: %g \t difC: %g \t difD: %g\n", i, sd[i].A, sd[i].B, sd[i].C, tc, sd[i].D, td, dc, dd) } fmt.Printf("\n") + fmt.Println("total:", gpuTmr.Total, "copy:", cpyTmr.Total) sy.Release() gp.Release() diff --git a/gpu/examples/compute/squares.wgsl b/gpu/examples/compute/squares.wgsl index aa390b0494..69ba23ee96 100644 --- a/gpu/examples/compute/squares.wgsl +++ b/gpu/examples/compute/squares.wgsl @@ -16,11 +16,19 @@ fn compute(d: ptr) { } @compute -@workgroup_size(64) -fn main(@builtin(global_invocation_id) idx: vec3) { - // compute(&In[idx.x]); - var d = In[idx.x]; +@workgroup_size(64,1,1) +fn main(@builtin(workgroup_id) wgid: vec3, @builtin(num_workgroups) nwg: vec3, @builtin(local_invocation_index) loci: u32) { + // note: wgid.x is the inner loop, then y, then z + let idx = loci + (wgid.x + wgid.y * nwg.x + wgid.z * nwg.x * nwg.y) * 64; + // note: array access is clamped so it doesn't exceed bounds, but best to check here + // and skip anything beyond the max size of buffer. + var d = In[idx]; compute(&d); - In[idx.x] = d; + In[idx] = d; + // the following is for testing indexing: uncomment to see. + // In[idx].A = f32(loci); + // In[idx].B = f32(wgid.x); + // In[idx].C = f32(wgid.y); + // In[idx].D = f32(idx); } diff --git a/gpu/gosl/README.md b/gpu/gosl/README.md deleted file mode 100644 index 754697f848..0000000000 --- a/gpu/gosl/README.md +++ /dev/null @@ -1,126 +0,0 @@ -# gosl - -`gosl` implements _Go as a shader language_ for GPU compute shaders (using [WebGPU](https://www.w3.org/TR/webgpu/)), **enabling standard Go code to run on the GPU**. - -The relevant subsets of Go code are specifically marked using `//gosl:` comment directives, and this code must only use basic expressions and concrete types that will compile correctly in a shader (see [Restrictions](#restrictions) below). Method functions and pass-by-reference pointer arguments to `struct` types are supported and incur no additional compute cost due to inlining (see notes below for more detail). - -A large and complex biologically-based neural network simulation framework called [axon](https://github.com/emer/axon) has been implemented using `gosl`, allowing 1000's of lines of equations and data structures to run through standard Go on the CPU, and accelerated significantly on the GPU. This allows efficient debugging and unit testing of the code in Go, whereas debugging on the GPU is notoriously difficult. - -`gosl` converts Go code to WGSL which can then be loaded directly into a WebGPU compute shader. - -See [examples/basic](examples/basic) and [rand](examples/rand) for examples, using the [gpu](../../gpu) GPU compute shader system. It is also possible in principle to use gosl to generate shader files for any other WebGPU application, but this has not been tested. - -You must also install `goimports` which is used on the extracted subset of Go code, to get the imports right: -```bash -$ go install golang.org/x/tools/cmd/goimports@latest -``` - -To install the `gosl` command, do: -```bash -$ go install cogentcore.org/core/vgpu/gosl@latest -``` - -In your Go code, use these comment directives: -``` -//gosl:start - -< Go code to be translated > - -//gosl:end -``` - -to bracket code to be processed. The resulting converted code is copied into a `shaders` subdirectory created under the current directory where the `gosl` command is run, using the filenames specified in the comment directives. Each such filename should correspond to a complete shader program (i.e., a "kernel"), or a file that can be included into other shader programs. Code is appended to the target file names in the order of the source .go files on the command line, so multiple .go files can be combined into one resulting WGSL file. - -WGSL specific code, e.g., for the `main` compute function or to specify `#include` files, can be included either by specifying files with a `.wgsl` extension as arguments to the `gosl` command, or by using a `//gosl:wgsl` comment directive as follows: -``` -//gosl:wgsl - -// - -//gosl:end -``` -where the WGSL shader code is commented out in the .go file -- it will be copied into the target filename and uncommented. The WGSL code can be surrounded by `/*` `*/` comment blocks (each on a separate line) for multi-line code (though using a separate `.wgsl` file is generally preferable in this case). - -For `.wgsl` files, their filename is used to determine the `shaders` destination file name, and they are automatically appended to the end of the corresponding `.wgsl` file generated from the `Go` files -- this is where the `main` function and associated global variables should be specified. - -**IMPORTANT:** all `.go` and `.wgsl` files are removed from the `shaders` directory prior to processing to ensure everything there is current -- always specify a different source location for any custom `.wgsl` files that are included. - -# Usage - -``` -gosl [flags] [path ...] -``` - -The flags are: -``` - -debug - enable debugging messages while running - -exclude string - comma-separated list of names of functions to exclude from exporting to HLSL (default "Update,Defaults") - -keep - keep temporary converted versions of the source files, for debugging - -out string - output directory for shader code, relative to where gosl is invoked -- must not be an empty string (default "shaders") -``` - -`gosl` path args can include filenames, directory names, or Go package paths (e.g., `cogentcore.org/core/math32/fastexp.go` loads just that file from the given package) -- files without any `//gosl:` comment directives will be skipped up front before any expensive processing, so it is not a problem to specify entire directories where only some files are relevant. Also, you can specify a particular file from a directory, then the entire directory, to ensure that a particular file from that directory appears first -- otherwise alphabetical order is used. `gosl` ensures that only one copy of each file is included. - -Any `struct` types encountered will be checked for 16-byte alignment of sub-types and overall sizes as an even multiple of 16 bytes (4 `float32` or `int32` values), which is the alignment used in WGSL and glsl shader languages, and the underlying GPU hardware presumably. Look for error messages on the output from the gosl run. This ensures that direct byte-wise copies of data between CPU and GPU will be successful. The fact that `gosl` operates directly on the original CPU-side Go code uniquely enables it to perform these alignment checks, which are otherwise a major source of difficult-to-diagnose bugs. - -# Restrictions - -In general shader code should be simple mathematical expressions and data types, with minimal control logic via `if`, `for` statements, and only using the subset of Go that is consistent with C. Here are specific restrictions: - -* Can only use `float32`, `[u]int32` for basic types (`int` is converted to `int32` automatically), and `struct` types composed of these same types -- no other Go types (i.e., `map`, slices, `string`, etc) are compatible. There are strict alignment restrictions on 16 byte (e.g., 4 `float32`'s) intervals that are enforced via the `alignsl` sub-package. - -* WGSL does _not_ support 64 bit float or int. - -* Use `slbool.Bool` instead of `bool` -- it defines a Go-friendly interface based on a `int32` basic type. - -* Alignment and padding of `struct` fields is key -- this is automatically checked by `gosl`. - -* WGSL does not support enum types, but standard go `const` declarations will be converted. Use an `int32` or `uint32` data type. It will automatically deal with the simple incrementing `iota` values, but not more complex cases. Also, for bitflags, define explicitly, not using `bitflags` package, and use `0x01`, `0x02`, `0x04` etc instead of `1<<2` -- in theory the latter should be ok but in practice it complains. - -* Cannot use multiple return values, or multiple assignment of variables in a single `=` expression. - -* *Can* use multiple variable names with the same type (e.g., `min, max float32`) -- this will be properly converted to the more redundant C form with the type repeated. - -* `switch` `case` statements are _purely_ self-contained -- no `fallthrough` allowed! does support multiple items per `case` however. - -* TODO: WGSL does not do multi-pass compiling, so all dependent types must be specified *before* being used in other ones, and this also precludes referencing the *current* type within itself. todo: can you just use a forward declaration? - -* WGSL does specify that new variables are initialized to 0, like Go, but also somehow discourages that use-case. It is safer to initialize directly: -```Go - val := float32(0) // guaranteed 0 value - var val float32 // ok but generally avoid -``` - -## Other language features - -* [tour-of-wgsl](https://google.github.io/tour-of-wgsl/types/pointers/passing_pointers/) is a good reference to explain things more directly than the spec. - -* `ptr` provides a pointer arg -* `private` scope = within the shader code "module", i.e., one thread. -* `function` = within the function, not outside it. -* `workgroup` = shared across workgroup -- coudl be powerful (but slow!) -- need to learn more. - -## Random numbers: slrand - -See [slrand](https://github.com/emer/gosl/v2/tree/main/slrand) for a shader-optimized random number generation package, which is supported by `gosl` -- it will convert `slrand` calls into appropriate WGSL named function calls. `gosl` will also copy the `slrand.wgsl` file, which contains the full source code for the RNG, into the destination `shaders` directory, so it can be included with a simple local path: - -```Go -//gosl:wgsl mycode -// #include "slrand.wgsl" -//gosl:end mycode -``` - -# Performance - -With sufficiently large N, and ignoring the data copying setup time, around ~80x speedup is typical on a Macbook Pro with M1 processor. The `rand` example produces a 175x speedup! - -# Implementation / Design Notes - -# Links - -Key docs for WGSL as compute shaders: - diff --git a/gpu/gosl/alignsl/README.md b/gpu/gosl/alignsl/README.md deleted file mode 100644 index 24e1e2d48c..0000000000 --- a/gpu/gosl/alignsl/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# AlignSL - -alignsl performs 16-byte alignment and total size modulus checking of struct types to ensure WGSL (and GSL) compatibility. - -Checks that `struct` sizes are an even multiple of 16 bytes (e.g., 4 float32's), fields are 32 or 64 bit types: [U]Int32, Float32, [U]Int64, Float64, and that fields that are other struct types are aligned at even 16 byte multiples. - -It is called with a [golang.org/x/tools/go/packages](https://pkg.go.dev/golang.org/x/tools/go/packages) `Package` that provides the `types.Sizes` and `Types.Scope()` to get the types. - -The `CheckPackage` method checks all types in a `Package`, and returns an error if there are any violations -- this error string contains a full user-friendly warning message that can be printed. - - - diff --git a/gpu/gosl/alignsl/alignsl.go b/gpu/gosl/alignsl/alignsl.go deleted file mode 100644 index 21cf353bdb..0000000000 --- a/gpu/gosl/alignsl/alignsl.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright (c) 2022, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -package alignsl performs 16-byte alignment checking of struct fields -and total size modulus checking of struct types to ensure WGSL -(and GSL) compatibility. - -Checks that struct sizes are an even multiple of 16 bytes -(4 float32's), fields are 32 bit types: [U]Int32, Float32, -and that fields that are other struct types are aligned -at even 16 byte multiples. -*/ -package alignsl - -import ( - "errors" - "fmt" - "go/types" - "strings" - - "golang.org/x/tools/go/packages" -) - -// Context for given package run -type Context struct { - Sizes types.Sizes // from package - Structs map[*types.Struct]string // structs that have been processed already -- value is name - Stack map[*types.Struct]string // structs to process in a second pass -- structs encountered during processing of other structs - Errs []string // accumulating list of error strings -- empty if all good -} - -func NewContext(sz types.Sizes) *Context { - cx := &Context{Sizes: sz} - cx.Structs = make(map[*types.Struct]string) - cx.Stack = make(map[*types.Struct]string) - return cx -} - -func (cx *Context) IsNewStruct(st *types.Struct) bool { - if _, has := cx.Structs[st]; has { - return false - } - cx.Structs[st] = st.String() - return true -} - -func (cx *Context) AddError(ers string, hasErr bool, stName string) bool { - if !hasErr { - cx.Errs = append(cx.Errs, stName) - } - cx.Errs = append(cx.Errs, ers) - return true -} - -func TypeName(tp types.Type) string { - switch x := tp.(type) { - case *types.Named: - return x.Obj().Name() - } - return tp.String() -} - -// CheckStruct is the primary checker -- returns hasErr = true if there -// are any mis-aligned fields or total size of struct is not an -// even multiple of 16 bytes -- adds details to Errs -func CheckStruct(cx *Context, st *types.Struct, stName string) bool { - if !cx.IsNewStruct(st) { - return false - } - var flds []*types.Var - nf := st.NumFields() - if nf == 0 { - return false - } - hasErr := false - for i := 0; i < nf; i++ { - fl := st.Field(i) - flds = append(flds, fl) - ft := fl.Type() - ut := ft.Underlying() - if bt, isBasic := ut.(*types.Basic); isBasic { - kind := bt.Kind() - if !(kind == types.Uint32 || kind == types.Int32 || kind == types.Float32 || kind == types.Uint64) { - hasErr = cx.AddError(fmt.Sprintf(" %s: basic type != [U]Int32 or Float32: %s", fl.Name(), bt.String()), hasErr, stName) - } - } else { - if sst, is := ut.(*types.Struct); is { - cx.Stack[sst] = TypeName(ft) - } else { - hasErr = cx.AddError(fmt.Sprintf(" %s: unsupported type: %s", fl.Name(), ft.String()), hasErr, stName) - } - } - } - offs := cx.Sizes.Offsetsof(flds) - last := cx.Sizes.Sizeof(flds[nf-1].Type()) - totsz := int(offs[nf-1] + last) - mod := totsz % 16 - if mod != 0 { - needs := 4 - (mod / 4) - hasErr = cx.AddError(fmt.Sprintf(" total size: %d not even multiple of 16 -- needs %d extra 32bit padding fields", totsz, needs), hasErr, stName) - } - - // check that struct starts at mod 16 byte offset - for i, fl := range flds { - ft := fl.Type() - ut := ft.Underlying() - if _, is := ut.(*types.Struct); is { - off := offs[i] - if off%16 != 0 { - - hasErr = cx.AddError(fmt.Sprintf(" %s: struct type: %s is not at mod-16 byte offset: %d", fl.Name(), TypeName(ft), off), hasErr, stName) - } - } - } - - return hasErr -} - -// CheckPackage is main entry point for checking a package -// returns error string if any errors found. -func CheckPackage(pkg *packages.Package) error { - cx := NewContext(pkg.TypesSizes) - sc := pkg.Types.Scope() - hasErr := CheckScope(cx, sc, 0) - er := CheckStack(cx) - if hasErr || er { - str := ` -WARNING: in struct type alignment checking: - Checks that struct sizes are an even multiple of 16 bytes (4 float32's), - and fields are 32 bit types: [U]Int32, Float32 or other struct, - and that fields that are other struct types are aligned at even 16 byte multiples. - List of errors found follow below, by struct type name: -` + strings.Join(cx.Errs, "\n") - return errors.New(str) - } - return nil -} - -func CheckStack(cx *Context) bool { - hasErr := false - for { - if len(cx.Stack) == 0 { - break - } - st := cx.Stack - cx.Stack = make(map[*types.Struct]string) // new stack - for st, nm := range st { - er := CheckStruct(cx, st, nm) - if er { - hasErr = true - } - } - } - return hasErr -} - -func CheckScope(cx *Context, sc *types.Scope, level int) bool { - nms := sc.Names() - ntyp := 0 - hasErr := false - for _, nm := range nms { - ob := sc.Lookup(nm) - tp := ob.Type() - if tp == nil { - continue - } - if nt, is := tp.(*types.Named); is { - ut := nt.Underlying() - if ut == nil { - continue - } - if st, is := ut.(*types.Struct); is { - er := CheckStruct(cx, st, nt.Obj().Name()) - if er { - hasErr = true - } - ntyp++ - } - } - } - if ntyp == 0 { - for i := 0; i < sc.NumChildren(); i++ { - cs := sc.Child(i) - er := CheckScope(cx, cs, level+1) - if er { - hasErr = true - } - } - } - return hasErr -} diff --git a/gpu/gosl/doc.go b/gpu/gosl/doc.go deleted file mode 100644 index b10cfe9835..0000000000 --- a/gpu/gosl/doc.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) 2022, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// copied from go src/cmd/gofmt/doc.go: - -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -gosl translates Go source code into WGSL compatible shader code. -use //gosl:start and //gosl:end to -bracket code that should be copied into shaders/.wgsl -Use //gosl:main instead of start for shader code that is -commented out in the .go file, which will be copied into the filename -and uncommented. - -pass filenames or directory names for files to process. - -Usage: - - gosl [flags] [path ...] - -The flags are: - - -debug - enable debugging messages while running - -exclude string - comma-separated list of names of functions to exclude from exporting to HLSL (default "Update,Defaults") - -keep - keep temporary converted versions of the source files, for debugging - -out string - output directory for shader code, relative to where gosl is invoked -- must not be an empty string (default "shaders") -*/ -package main diff --git a/gpu/gosl/examples/basic/README.md b/gpu/gosl/examples/basic/README.md deleted file mode 100644 index 057700da3f..0000000000 --- a/gpu/gosl/examples/basic/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# basic - -This example just does some basic calculations on data structures and reports the time difference between the CPU and GPU. Getting about a 20x speedup on a Macbook Pro M3 Max. - -# Building - -There is a `//go:generate` comment directive in `main.go` that calls `gosl` on the relevant files, so you can do `go generate` followed by `go build` to run it. There is also a `Makefile` with the same `gosl` command, so `make` can be used instead of go generate. - -The generated files go into the `shaders/` subdirectory. - - diff --git a/gpu/gosl/examples/basic/compute.go b/gpu/gosl/examples/basic/compute.go deleted file mode 100644 index 27b43a7c27..0000000000 --- a/gpu/gosl/examples/basic/compute.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (c) 2022, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import "cogentcore.org/core/math32" - -//gosl:wgsl basic -// #include "fastexp.wgsl" -//gosl:end basic - -//gosl:start basic - -// DataStruct has the test data -type DataStruct struct { - - // raw value - Raw float32 - - // integrated value - Integ float32 - - // exp of integ - Exp float32 - - // must pad to multiple of 4 floats for arrays - pad float32 -} - -// ParamStruct has the test params -type ParamStruct struct { - - // rate constant in msec - Tau float32 - - // 1/Tau - Dt float32 - - pad float32 - pad1 float32 -} - -// IntegFromRaw computes integrated value from current raw value -func (ps *ParamStruct) IntegFromRaw(ds *DataStruct) { - ds.Integ += ps.Dt * (ds.Raw - ds.Integ) - ds.Exp = math32.FastExp(-ds.Integ) -} - -//gosl:end basic - -// note: only core compute code needs to be in shader -- all init is done CPU-side - -func (ps *ParamStruct) Defaults() { - ps.Tau = 5 - ps.Update() -} - -func (ps *ParamStruct) Update() { - ps.Dt = 1.0 / ps.Tau -} - -//gosl:wgsl basic -/* -@group(0) @binding(0) -var Params: array; - -@group(0) @binding(1) -var Data: array; - -@compute -@workgroup_size(64) -fn main(@builtin(global_invocation_id) idx: vec3) { - var pars = Params[0]; - var data = Data[idx.x]; - ParamStruct_IntegFromRaw(&pars, &data); - Data[idx.x] = data; -} -*/ -//gosl:end basic diff --git a/gpu/gosl/examples/basic/main.go b/gpu/gosl/examples/basic/main.go deleted file mode 100644 index 87a6be2469..0000000000 --- a/gpu/gosl/examples/basic/main.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright (c) 2022, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This example just does some basic calculations on data structures and -// reports the time difference between the CPU and GPU. -package main - -import ( - "embed" - "fmt" - "math/rand" - "runtime" - "unsafe" - - "cogentcore.org/core/base/timer" - "cogentcore.org/core/gpu" -) - -//go:generate ../../gosl cogentcore.org/core/math32/fastexp.go compute.go - -//go:embed shaders/basic.wgsl shaders/fastexp.wgsl -var shaders embed.FS - -func init() { - // must lock main thread for gpu! - runtime.LockOSThread() -} - -func main() { - gpu.Debug = true - gp := gpu.NewComputeGPU() - fmt.Printf("Running on GPU: %s\n", gp.DeviceName) - - // gp.PropertiesString(true) // print - - sy := gpu.NewComputeSystem(gp, "compute") - pl := gpu.NewComputePipelineShaderFS(shaders, "shaders/basic.wgsl", sy) - vars := sy.Vars() - sgp := vars.AddGroup(gpu.Storage) - - n := 2000000 // note: not necc to spec up-front, but easier if so - threads := 64 - - pv := sgp.AddStruct("Params", int(unsafe.Sizeof(ParamStruct{})), 1, gpu.ComputeShader) - dv := sgp.AddStruct("Data", int(unsafe.Sizeof(DataStruct{})), n, gpu.ComputeShader) - - sgp.SetNValues(1) - sy.Config() - - pvl := pv.Values.Values[0] - dvl := dv.Values.Values[0] - - pars := make([]ParamStruct, 1) - pars[0].Defaults() - - cd := make([]DataStruct, n) - for i := range cd { - cd[i].Raw = rand.Float32() - } - - sd := make([]DataStruct, n) - for i := range sd { - sd[i].Raw = cd[i].Raw - } - - cpuTmr := timer.Time{} - cpuTmr.Start() - for i := range cd { - pars[0].IntegFromRaw(&cd[i]) - } - cpuTmr.Stop() - - gpuFullTmr := timer.Time{} - gpuFullTmr.Start() - - gpu.SetValueFrom(pvl, pars) - gpu.SetValueFrom(dvl, sd) - - sgp.CreateReadBuffers() - - gpuTmr := timer.Time{} - gpuTmr.Start() - - ce, _ := sy.BeginComputePass() - pl.Dispatch1D(ce, n, threads) - ce.End() - dvl.GPUToRead(sy.CommandEncoder) - sy.EndComputePass(ce) - - gpuTmr.Stop() - - dvl.ReadSync() - gpu.ReadToBytes(dvl, sd) - - gpuFullTmr.Stop() - - mx := min(n, 5) - for i := 0; i < mx; i++ { - d := cd[i].Exp - sd[i].Exp - fmt.Printf("%d\t Raw: %g\t Integ: %g\t Exp: %6.4g\tTrg: %6.4g\tDiff: %g\n", i, sd[i].Raw, sd[i].Integ, sd[i].Exp, cd[i].Exp, d) - } - fmt.Printf("\n") - - cpu := cpuTmr.Total - gpu := gpuTmr.Total - gpuFull := gpuFullTmr.Total - fmt.Printf("N: %d\t CPU: %v\t GPU: %v\t Full: %v\t CPU/GPU: %6.4g\n", n, cpu, gpu, gpuFull, float64(cpu)/float64(gpu)) - - sy.Release() - gp.Release() -} diff --git a/gpu/gosl/examples/basic/shaders/basic.wgsl b/gpu/gosl/examples/basic/shaders/basic.wgsl deleted file mode 100644 index 929012d598..0000000000 --- a/gpu/gosl/examples/basic/shaders/basic.wgsl +++ /dev/null @@ -1,52 +0,0 @@ - -#include "fastexp.wgsl" - -// DataStruct has the test data -struct DataStruct { - - // raw value - Raw: f32, - - // integrated value - Integ: f32, - - // exp of integ - Exp: f32, - - // must pad to multiple of 4 floats for arrays - pad: f32, -} - -// ParamStruct has the test params -struct ParamStruct { - - // rate constant in msec - Tau: f32, - - // 1/Tau - Dt: f32, - - pad: f32, - pad1: f32, -} - -// IntegFromRaw computes integrated value from current raw value -fn ParamStruct_IntegFromRaw(ps: ptr, ds: ptr) { - (*ds).Integ += (*ps).Dt * ((*ds).Raw - (*ds).Integ); - (*ds).Exp = FastExp(-(*ds).Integ); -} - -@group(0) @binding(0) -var Params: array; - -@group(0) @binding(1) -var Data: array; - -@compute -@workgroup_size(64) -fn main(@builtin(global_invocation_id) idx: vec3) { - var pars = Params[0]; - var data = Data[idx.x]; - ParamStruct_IntegFromRaw(&pars, &data); - Data[idx.x] = data; -} diff --git a/gpu/gosl/examples/basic/shaders/fastexp.wgsl b/gpu/gosl/examples/basic/shaders/fastexp.wgsl deleted file mode 100644 index c5f278921b..0000000000 --- a/gpu/gosl/examples/basic/shaders/fastexp.wgsl +++ /dev/null @@ -1,14 +0,0 @@ - -// FastExp is a quartic spline approximation to the Exp function, by N.N. Schraudolph -// It does not have any of the sanity checking of a standard method -- returns -// nonsense when arg is out of range. Runs in 2.23ns vs. 6.3ns for 64bit which is faster -// than exp actually. -fn FastExp(x: f32) -> f32 { - if (x <= -88.02969) { // this doesn't add anything and -exp is main use-case anyway - return f32(0.0); - } - var i = i32(12102203*x) + i32(127)*(i32(1)<<23); - var m = i >> 7 & 0xFFFF; // copy mantissa - i += (((((((((((3537 * m) >> 16) + 13668) * m) >> 18) + 15817) * m) >> 14) - 80470) * m) >> 11); - return bitcast(u32(i)); -} diff --git a/gpu/gosl/examples/rand/README.md b/gpu/gosl/examples/rand/README.md deleted file mode 100644 index 7847975e0d..0000000000 --- a/gpu/gosl/examples/rand/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# rand - -This example tests the `slrand` random number generation functions. The `go test` in `slrand` itself tests the Go version of the code against known values, and this example tests the GPU WGSL versions against the Go versions. - -The output shows the first 5 sets of random numbers, with the CPU on the first line and the GPU on the second line. If there are any `*` on any of the lines, then there is a difference between the two, and an error message will be reported at the bottom. - -The total time to generate 1 million random numbers is shown at the end, comparing time on the CPU vs. GPU. On a Mac Book Pro M3 Max laptop, the CPU took roughly _95_ times longer to generate the random numbers than the GPU (and with 10 million, _140 times_). - -# Building - -There is a `//go:generate` comment directive in `main.go` that calls _the local build_ of `gosl` on the relevant files, so you can do `go generate` followed by `go build` to run it. You must do `go build` in main `gosl` dir before this will work. - -The generated files go into the `shaders/` subdirectory. - -Ignore the type alignment checking errors about Uint2 and Vector2 not being an even multiple of 16 bytes -- we have put in the necessary padding. - - -# Results - -``` -Running on GPU: Apple M3 Max -Group: 0 Group0 - Role: Storage - Var: 0: Counter Struct (size: 8) Values: 1 - Var: 1: Data Struct[0xF4240] (size: 64) Values: 1 - -Index Dif(Ex,Tol) CPU then GPU -0 U: ff1dae59 6cd10df2 F: 0.86274576 0.37199184 F11: 0.018057441 0.24895307 G: 0.7749991 0.05054265 - U: ff1dae59 6cd10df2 F: 0.86274576 0.37199184 F11: 0.018057441 0.24895307 G: 0.7749991 0.050542645 -1 U: 936f52f3 5daa6164 F: 0.87315667 0.9577325 F11: 0.7201687 0.93292534 G: 0.82841516 2.2875004 - U: 936f52f3 5daa6164 F: 0.87315667 0.9577325 F11: 0.7201687 0.93292534 G: 0.82841516 2.2875004 -2 U: 1a9351a6 5109a5a6 F: 0.7390654 0.13888454 F11: 0.87044024 0.66876566 G: -1.4226444 0.024453443 - U: 1a9351a6 5109a5a6 F: 0.7390654 0.13888454 F11: 0.87044024 0.66876566 G: -1.4226445 0.024453443 -3 U: 19b1b6d2 310630c9 F: 0.8936864 0.29176176 F11: 0.5634876 0.43976986 G: 0.07859113 0.07448565 - U: 19b1b6d2 310630c9 F: 0.8936864 0.29176176 F11: 0.5634876 0.43976986 G: 0.07859113 0.07448566 -4 U: 41556b7f eeb8e52c F: 0.46174365 0.28119206 F11: 0.00079108716 0.87918425 G: 0.2611614 -1.3341242 - U: 41556b7f eeb8e52c F: 0.46174365 0.28119206 F11: 0.00079108716 0.87918425 G: 0.26116177 -1.3341241 -5 U: c034b0a6 7188ed5e F: 0.9694913 0.6442756 F11: 0.071927555 0.5161969 G: 0.6914865 0.32279235 - U: c034b0a6 7188ed5e F: 0.9694913 0.6442756 F11: 0.071927555 0.5161969 G: 0.6914865 0.32279235 - -N: 1000000 CPU: 104.698542ms GPU: 1.095417ms Full: 27.570417ms CPU/GPU: 95.58 -``` - diff --git a/gpu/gosl/examples/rand/main.go b/gpu/gosl/examples/rand/main.go deleted file mode 100644 index f053e96766..0000000000 --- a/gpu/gosl/examples/rand/main.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "embed" - "fmt" - "runtime" - "unsafe" - - "log/slog" - - "cogentcore.org/core/base/timer" - "cogentcore.org/core/gpu" -) - -// note: standard one to use is plain "gosl" which should be go install'd - -//go:generate ../../gosl rand.go rand.wgsl - -//go:embed shaders/*.wgsl -var shaders embed.FS - -func init() { - // must lock main thread for gpu! - runtime.LockOSThread() -} - -func main() { - gpu.Debug = true - gp := gpu.NewComputeGPU() - fmt.Printf("Running on GPU: %s\n", gp.DeviceName) - - // n := 10 - n := 4_000_000 // 5_000_000 is too much -- 256_000_000 -- up against buf size limit - threads := 64 - - dataC := make([]Rnds, n) - dataG := make([]Rnds, n) - - cpuTmr := timer.Time{} - cpuTmr.Start() - - seed := uint64(0) - for i := range dataC { - d := &dataC[i] - d.RndGen(seed, uint32(i)) - } - cpuTmr.Stop() - - sy := gpu.NewComputeSystem(gp, "slrand") - pl := gpu.NewComputePipelineShaderFS(shaders, "shaders/rand.wgsl", sy) - vars := sy.Vars() - sgp := vars.AddGroup(gpu.Storage) - - ctrv := sgp.AddStruct("Counter", int(unsafe.Sizeof(seed)), 1, gpu.ComputeShader) - datav := sgp.AddStruct("Data", int(unsafe.Sizeof(Rnds{})), n, gpu.ComputeShader) - - sgp.SetNValues(1) - sy.Config() - - cvl := ctrv.Values.Values[0] - dvl := datav.Values.Values[0] - - gpuFullTmr := timer.Time{} - gpuFullTmr.Start() - - gpu.SetValueFrom(cvl, []uint64{seed}) - gpu.SetValueFrom(dvl, dataG) - - sgp.CreateReadBuffers() - - gpuTmr := timer.Time{} - gpuTmr.Start() - - ce, _ := sy.BeginComputePass() - pl.Dispatch1D(ce, n, threads) - ce.End() - dvl.GPUToRead(sy.CommandEncoder) - sy.EndComputePass(ce) - - gpuTmr.Stop() - - dvl.ReadSync() - gpu.ReadToBytes(dvl, dataG) - - gpuFullTmr.Stop() - - anyDiffEx := false - anyDiffTol := false - mx := min(n, 5) - fmt.Printf("Index\tDif(Ex,Tol)\t CPU \t then GPU\n") - for i := 0; i < n; i++ { - dc := &dataC[i] - dg := &dataG[i] - smEx, smTol := dc.IsSame(dg) - if !smEx { - anyDiffEx = true - } - if !smTol { - anyDiffTol = true - } - if i > mx { - continue - } - exS := " " - if !smEx { - exS = "*" - } - tolS := " " - if !smTol { - tolS = "*" - } - fmt.Printf("%d\t%s %s\t%s\n\t\t%s\n", i, exS, tolS, dc.String(), dg.String()) - } - fmt.Printf("\n") - - if anyDiffEx { - slog.Error("Differences between CPU and GPU detected at Exact level (excludes Gauss)") - } - if anyDiffTol { - slog.Error("Differences between CPU and GPU detected at Tolerance level", "tolerance", Tol) - } - - cpu := cpuTmr.Total - gpu := gpuTmr.Total - fmt.Printf("N: %d\t CPU: %v\t GPU: %v\t Full: %v\t CPU/GPU: %6.4g\n", n, cpu, gpu, gpuFullTmr.Total, float64(cpu)/float64(gpu)) - - sy.Release() - gp.Release() -} diff --git a/gpu/gosl/examples/rand/rand.go b/gpu/gosl/examples/rand/rand.go deleted file mode 100644 index 2f28690f8c..0000000000 --- a/gpu/gosl/examples/rand/rand.go +++ /dev/null @@ -1,73 +0,0 @@ -package main - -import ( - "fmt" - - "cogentcore.org/core/gpu/gosl/slrand" - "cogentcore.org/core/gpu/gosl/sltype" - "cogentcore.org/core/math32" -) - -//gosl:wgsl rand -// #include "slrand.wgsl" -//gosl:end rand - -//gosl:start rand - -type Rnds struct { - Uints sltype.Uint32Vec2 - pad, pad1 int32 - Floats sltype.Float32Vec2 - pad2, pad3 int32 - Floats11 sltype.Float32Vec2 - pad4, pad5 int32 - Gauss sltype.Float32Vec2 - pad6, pad7 int32 -} - -// RndGen calls random function calls to test generator. -// Note that the counter to the outer-most computation function -// is passed by *value*, so the same counter goes to each element -// as it is computed, but within this scope, counter is passed by -// reference (as a pointer) so subsequent calls get a new counter value. -// The counter should be incremented by the number of random calls -// outside of the overall update function. -func (r *Rnds) RndGen(counter uint64, idx uint32) { - r.Uints = slrand.Uint32Vec2(counter, uint32(0), idx) - r.Floats = slrand.Float32Vec2(counter, uint32(1), idx) - r.Floats11 = slrand.Float32Range11Vec2(counter, uint32(2), idx) - r.Gauss = slrand.Float32NormVec2(counter, uint32(3), idx) -} - -//gosl:end rand - -const Tol = 1.0e-4 // fails at lower tol eventually -- -6 works for many - -func FloatSame(f1, f2 float32) (exact, tol bool) { - exact = f1 == f2 - tol = math32.Abs(f1-f2) < Tol - return -} - -func Float32Vec2Same(f1, f2 sltype.Float32Vec2) (exact, tol bool) { - e1, t1 := FloatSame(f1.X, f2.X) - e2, t2 := FloatSame(f1.Y, f2.Y) - exact = e1 && e2 - tol = t1 && t2 - return -} - -// IsSame compares values at two levels: exact and with Tol -func (r *Rnds) IsSame(o *Rnds) (exact, tol bool) { - e1 := r.Uints == o.Uints - e2, t2 := Float32Vec2Same(r.Floats, o.Floats) - e3, t3 := Float32Vec2Same(r.Floats11, o.Floats11) - _, t4 := Float32Vec2Same(r.Gauss, o.Gauss) - exact = e1 && e2 && e3 // skip e4 -- know it isn't - tol = t2 && t3 && t4 - return -} - -func (r *Rnds) String() string { - return fmt.Sprintf("U: %x\t%x\tF: %g\t%g\tF11: %g\t%g\tG: %g\t%g", r.Uints.X, r.Uints.Y, r.Floats.X, r.Floats.Y, r.Floats11.X, r.Floats11.Y, r.Gauss.X, r.Gauss.Y) -} diff --git a/gpu/gosl/examples/rand/rand.wgsl b/gpu/gosl/examples/rand/rand.wgsl deleted file mode 100644 index 780ae9ef26..0000000000 --- a/gpu/gosl/examples/rand/rand.wgsl +++ /dev/null @@ -1,16 +0,0 @@ - -@group(0) @binding(0) -var Counter: array; - -@group(0) @binding(1) -var Data: array; - -@compute -@workgroup_size(64) -fn main(@builtin(global_invocation_id) idx: vec3) { - var ctr = Counter[0]; - var data = Data[idx.x]; - Rnds_RndGen(&data, ctr, idx.x); - Data[idx.x] = data; -} - diff --git a/gpu/gosl/examples/rand/shaders/rand.wgsl b/gpu/gosl/examples/rand/shaders/rand.wgsl deleted file mode 100644 index baf5d96be3..0000000000 --- a/gpu/gosl/examples/rand/shaders/rand.wgsl +++ /dev/null @@ -1,371 +0,0 @@ - -// #include "slrand.wgsl" -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Original file is in Go package: github.com/cogentcore/core/gpu/gosl/slrand -// See README.md there for documentation. - -// These random number generation (RNG) functions are optimized for -// use on the GPU, with equivalent Go versions available in slrand.go. -// This is using the Philox2x32 counter-based RNG. - -// #include "sltype.wgsl" -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Original file is in Go package: github.com/cogentcore/core/gpu/gosl/sltype -// See README.md there for documentation. - -// This file emulates uint64 (u64) using 2 uint32 integers. -// and defines conversions between uint and float. - -// define a u64 type as an alias. -// if / when u64 actually happens, will make it easier to update. -alias su64 = vec2; - -// Uint32Mul64 multiplies two uint32 numbers into a uint64 (using vec2). -fn Uint32Mul64(a: u32, b: u32) -> su64 { - let LOMASK = (((u32(1))<<16)-1); - var r: su64; - r.x = a * b; /* full low multiply */ - let ahi = a >> 16; - let alo = a & LOMASK; - let bhi = b >> 16; - let blo = b & LOMASK; - - let ahbl = ahi * blo; - let albh = alo * bhi; - - let ahbl_albh = ((ahbl&LOMASK) + (albh&LOMASK)); - var hit = ahi*bhi + (ahbl>>16) + (albh>>16); - hit += ahbl_albh >> 16; /* carry from the sum of lo(ahbl) + lo(albh) ) */ - /* carry from the sum with alo*blo */ - if ((r.x >> u32(16)) < (ahbl_albh&LOMASK)) { - hit += u32(1); - } - r.y = hit; - return r; -} - -/* -// Uint32Mul64 multiplies two uint32 numbers into a uint64 (using su64). -fn Uint32Mul64(a: u32, b: u32) -> su64 { - return su64(a) * su64(b); -} -*/ - - -// Uint64Add32 adds given uint32 number to given uint64 (using vec2). -fn Uint64Add32(a: su64, b: u32) -> su64 { - if (b == 0) { - return a; - } - var s = a; - if (s.x > u32(0xffffffff) - b) { - s.y++; - s.x = (b - 1) - (u32(0xffffffff) - s.x); - } else { - s.x += b; - } - return s; -} - -// Uint64Incr returns increment of the given uint64 (using vec2). -fn Uint64Incr(a: su64) -> su64 { - var s = a; - if(s.x == 0xffffffff) { - s.y++; - s.x = u32(0); - } else { - s.x++; - } - return s; -} - -// Uint32ToFloat32 converts a uint32 integer into a float32 -// in the (0,1) interval (i.e., exclusive of 1). -// This differs from the Go standard by excluding 0, which is handy for passing -// directly to Log function, and from the reference Philox code by excluding 1 -// which is in the Go standard and most other standard RNGs. -fn Uint32ToFloat32(val: u32) -> f32 { - let factor = f32(1.0) / (f32(u32(0xffffffff)) + f32(1.0)); - let halffactor = f32(0.5) * factor; - var f = f32(val) * factor + halffactor; - if (f == 1.0) { // exclude 1 - return bitcast(0x3F7FFFFF); - } - return f; -} - -// note: there is no overloading of user-defined functions -// https://github.com/gpuweb/gpuweb/issues/876 - -// Uint32ToFloat32Vec2 converts two uint 32 bit integers -// into two corresponding 32 bit f32 values -// in the (0,1) interval (i.e., exclusive of 1). -fn Uint32ToFloat32Vec2(val: vec2) -> vec2 { - var r: vec2; - r.x = Uint32ToFloat32(val.x); - r.y = Uint32ToFloat32(val.y); - return r; -} - -// Uint32ToFloat32Range11 converts a uint32 integer into a float32 -// in the [-1..1] interval (inclusive of -1 and 1, never identically == 0). -fn Uint32ToFloat32Range11(val: u32) -> f32 { - let factor = f32(1.0) / (f32(i32(0x7fffffff)) + f32(1.0)); - let halffactor = f32(0.5) * factor; - return (f32(val) * factor + halffactor); -} - -// Uint32ToFloat32Range11Vec2 converts two uint32 integers into two float32 -// in the [-1,1] interval (inclusive of -1 and 1, never identically == 0). -fn Uint32ToFloat32Range11Vec2(val: vec2) -> vec2 { - var r: vec2; - r.x = Uint32ToFloat32Range11(val.x); - r.y = Uint32ToFloat32Range11(val.y); - return r; -} - - - - -// Philox2x32round does one round of updating of the counter. -fn Philox2x32round(counter: su64, key: u32) -> su64 { - let mul = Uint32Mul64(u32(0xD256D193), counter.x); - var ctr: su64; - ctr.x = mul.y ^ key ^ counter.y; - ctr.y = mul.x; - return ctr; -} - -// Philox2x32bumpkey does one round of updating of the key -fn Philox2x32bumpkey(key: u32) -> u32 { - return key + u32(0x9E3779B9); -} - -// Philox2x32 implements the stateless counter-based RNG algorithm -// returning a random number as two uint32 values, given a -// counter and key input that determine the result. -// The input counter is not modified. -fn Philox2x32(counter: su64, key: u32) -> vec2 { - // this is an unrolled loop of 10 updates based on initial counter and key, - // which produces the random deviation deterministically based on these inputs. - var ctr = Philox2x32round(counter, key); // 1 - var ky = Philox2x32bumpkey(key); - ctr = Philox2x32round(ctr, ky); // 2 - ky = Philox2x32bumpkey(ky); - ctr = Philox2x32round(ctr, ky); // 3 - ky = Philox2x32bumpkey(ky); - ctr = Philox2x32round(ctr, ky); // 4 - ky = Philox2x32bumpkey(ky); - ctr = Philox2x32round(ctr, ky); // 5 - ky = Philox2x32bumpkey(ky); - ctr = Philox2x32round(ctr, ky); // 6 - ky = Philox2x32bumpkey(ky); - ctr = Philox2x32round(ctr, ky); // 7 - ky = Philox2x32bumpkey(ky); - ctr = Philox2x32round(ctr, ky); // 8 - ky = Philox2x32bumpkey(ky); - ctr = Philox2x32round(ctr, ky); // 9 - ky = Philox2x32bumpkey(ky); - - return Philox2x32round(ctr, ky); // 10 -} - -//////////////////////////////////////////////////////////// -// Methods below provide a standard interface with more -// readable names, mapping onto the Go rand methods. -// -// They assume a global shared counter, which is then -// incremented by a function index, defined for each function -// consuming random numbers that _could_ be called within a parallel -// processing loop. At the end of the loop, the global counter should -// be incremented by the total possible number of such functions. -// This results in fully resproducible results, invariant to -// specific processing order, and invariant to whether any one function -// actually calls the random number generator. - -// RandUint32Vec2 returns two uniformly distributed 32 unsigned integers, -// based on given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandUint32Vec2(counter: su64, funcIndex: u32, key: u32) -> vec2 { - return Philox2x32(Uint64Add32(counter, funcIndex), key); -} - -// RandUint32 returns a uniformly distributed 32 unsigned integer, -// based on given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandUint32(counter: su64, funcIndex: u32, key: u32) -> u32 { - return Philox2x32(Uint64Add32(counter, funcIndex), key).x; -} - -// RandFloat32Vec2 returns two uniformly distributed float32 values in range (0,1), -// based on given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandFloat32Vec2(counter: su64, funcIndex: u32, key: u32) -> vec2 { - return Uint32ToFloat32Vec2(RandUint32Vec2(counter, funcIndex, key)); -} - -// RandFloat32 returns a uniformly distributed float32 value in range (0,1), -// based on given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandFloat32(counter: su64, funcIndex: u32, key: u32) -> f32 { - return Uint32ToFloat32(RandUint32(counter, funcIndex, key)); -} - -// RandFloat32Range11Vec2 returns two uniformly distributed float32 values in range [-1,1], -// based on given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandFloat32Range11Vec2(counter: su64, funcIndex: u32, key: u32) -> vec2 { - return Uint32ToFloat32Vec2(RandUint32Vec2(counter, funcIndex, key)); -} - -// RandFloat32Range11 returns a uniformly distributed float32 value in range [-1,1], -// based on given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandFloat32Range11(counter: su64, funcIndex: u32, key: u32) -> f32 { - return Uint32ToFloat32Range11(RandUint32(counter, funcIndex, key)); -} - -// RandBoolP returns a bool true value with probability p -// based on given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandBoolP(counter: su64, funcIndex: u32, key: u32, p: f32) -> bool { - return (RandFloat32(counter, funcIndex, key) < p); -} - -fn sincospi(x: f32) -> vec2 { - let PIf = 3.1415926535897932; - var r: vec2; - r.x = cos(PIf*x); - r.y = sin(PIf*x); - return r; -} - -// RandFloat32NormVec2 returns two random float32 numbers -// distributed according to the normal, Gaussian distribution -// with zero mean and unit variance. -// This is done very efficiently using the Box-Muller algorithm -// that consumes two random 32 bit uint values. -// Uses given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandFloat32NormVec2(counter: su64, funcIndex: u32, key: u32) -> vec2 { - let ur = RandUint32Vec2(counter, funcIndex, key); - var f = sincospi(Uint32ToFloat32Range11(ur.x)); - let r = sqrt(-2.0 * log(Uint32ToFloat32(ur.y))); // guaranteed to avoid 0. - return f * r; -} - -// RandFloat32Norm returns a random float32 number -// distributed according to the normal, Gaussian distribution -// with zero mean and unit variance. -// Uses given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandFloat32Norm(counter: su64, funcIndex: u32, key: u32) -> f32 { - return RandFloat32Vec2(counter, funcIndex, key).x; -} - -// RandUint32N returns a uint32 in the range [0,N). -// Uses given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandUint32N(counter: su64, funcIndex: u32, key: u32, n: u32) -> u32 { - let v = RandFloat32(counter, funcIndex, key); - return u32(v * f32(n)); -} - -// Counter is used for storing the random counter using aligned 16 byte -// storage, with convenience functions for typical use cases. -// It retains a copy of the last Seed value, which is applied to -// the Hi uint32 value. -struct RandCounter { - Counter: su64, - HiSeed: u32, - pad: u32, -} - -// Reset resets counter to last set Seed state. -fn RandCounter_Reset(ct: ptr) { - (*ct).Counter.x = u32(0); - (*ct).Counter.y = (*ct).HiSeed; -} - -// Seed sets the Hi uint32 value from given seed, saving it in Seed field. -// Each increment in seed generates a unique sequence of over 4 billion numbers, -// so it is reasonable to just use incremental values there, but more widely -// spaced numbers will result in longer unique sequences. -// Resets Lo to 0. -// This same seed will be restored during Reset -fn RandCounter_Seed(ct: ptr, seed: u32) { - (*ct).HiSeed = seed; - RandCounter_Reset(ct); -} - -// Add increments the counter by given amount. -// Call this after completing a pass of computation -// where the value passed here is the max of funcIndex+1 -// used for any possible random calls during that pass. -fn RandCounter_Add(ct: ptr, inc: u32) { - (*ct).Counter = Uint64Add32((*ct).Counter, inc); -} - - -struct Rnds { - Uints: vec2, - pad: i32, - pad1: i32, - Floats: vec2, - pad2: i32, - pad3: i32, - Floats11: vec2, - pad4: i32, - pad5: i32, - Gauss: vec2, - pad6: i32, - pad7: i32, -} - -// RndGen calls random function calls to test generator. -// Note that the counter to the outer-most computation function -// is passed by *value*, so the same counter goes to each element -// as it is computed, but within this scope, counter is passed by -// reference (as a pointer) so subsequent calls get a new counter value. -// The counter should be incremented by the number of random calls -// outside of the overall update function. -fn Rnds_RndGen(r: ptr, counter: su64, idx: u32) { - (*r).Uints = RandUint32Vec2(counter, u32(0), idx); - (*r).Floats = RandFloat32Vec2(counter, u32(1), idx); - (*r).Floats11 = RandFloat32Range11Vec2(counter, u32(2), idx); - (*r).Gauss = RandFloat32NormVec2(counter, u32(3), idx); -} - -// from file: rand.wgsl - -@group(0) @binding(0) -var Counter: array; - -@group(0) @binding(1) -var Data: array; - -@compute -@workgroup_size(64) -fn main(@builtin(global_invocation_id) idx: vec3) { - var ctr = Counter[0]; - var data = Data[idx.x]; - Rnds_RndGen(&data, ctr, idx.x); - Data[idx.x] = data; -} - diff --git a/gpu/gosl/examples/rand/shaders/slrand.wgsl b/gpu/gosl/examples/rand/shaders/slrand.wgsl deleted file mode 100644 index 820e7bdf62..0000000000 --- a/gpu/gosl/examples/rand/shaders/slrand.wgsl +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Original file is in Go package: github.com/cogentcore/core/gpu/gosl/slrand -// See README.md there for documentation. - -// These random number generation (RNG) functions are optimized for -// use on the GPU, with equivalent Go versions available in slrand.go. -// This is using the Philox2x32 counter-based RNG. - -#include "sltype.wgsl" - -// Philox2x32round does one round of updating of the counter. -fn Philox2x32round(counter: su64, key: u32) -> su64 { - let mul = Uint32Mul64(u32(0xD256D193), counter.x); - var ctr: su64; - ctr.x = mul.y ^ key ^ counter.y; - ctr.y = mul.x; - return ctr; -} - -// Philox2x32bumpkey does one round of updating of the key -fn Philox2x32bumpkey(key: u32) -> u32 { - return key + u32(0x9E3779B9); -} - -// Philox2x32 implements the stateless counter-based RNG algorithm -// returning a random number as two uint32 values, given a -// counter and key input that determine the result. -// The input counter is not modified. -fn Philox2x32(counter: su64, key: u32) -> vec2 { - // this is an unrolled loop of 10 updates based on initial counter and key, - // which produces the random deviation deterministically based on these inputs. - var ctr = Philox2x32round(counter, key); // 1 - var ky = Philox2x32bumpkey(key); - ctr = Philox2x32round(ctr, ky); // 2 - ky = Philox2x32bumpkey(ky); - ctr = Philox2x32round(ctr, ky); // 3 - ky = Philox2x32bumpkey(ky); - ctr = Philox2x32round(ctr, ky); // 4 - ky = Philox2x32bumpkey(ky); - ctr = Philox2x32round(ctr, ky); // 5 - ky = Philox2x32bumpkey(ky); - ctr = Philox2x32round(ctr, ky); // 6 - ky = Philox2x32bumpkey(ky); - ctr = Philox2x32round(ctr, ky); // 7 - ky = Philox2x32bumpkey(ky); - ctr = Philox2x32round(ctr, ky); // 8 - ky = Philox2x32bumpkey(ky); - ctr = Philox2x32round(ctr, ky); // 9 - ky = Philox2x32bumpkey(ky); - - return Philox2x32round(ctr, ky); // 10 -} - -//////////////////////////////////////////////////////////// -// Methods below provide a standard interface with more -// readable names, mapping onto the Go rand methods. -// -// They assume a global shared counter, which is then -// incremented by a function index, defined for each function -// consuming random numbers that _could_ be called within a parallel -// processing loop. At the end of the loop, the global counter should -// be incremented by the total possible number of such functions. -// This results in fully resproducible results, invariant to -// specific processing order, and invariant to whether any one function -// actually calls the random number generator. - -// RandUint32Vec2 returns two uniformly distributed 32 unsigned integers, -// based on given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandUint32Vec2(counter: su64, funcIndex: u32, key: u32) -> vec2 { - return Philox2x32(Uint64Add32(counter, funcIndex), key); -} - -// RandUint32 returns a uniformly distributed 32 unsigned integer, -// based on given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandUint32(counter: su64, funcIndex: u32, key: u32) -> u32 { - return Philox2x32(Uint64Add32(counter, funcIndex), key).x; -} - -// RandFloat32Vec2 returns two uniformly distributed float32 values in range (0,1), -// based on given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandFloat32Vec2(counter: su64, funcIndex: u32, key: u32) -> vec2 { - return Uint32ToFloat32Vec2(RandUint32Vec2(counter, funcIndex, key)); -} - -// RandFloat32 returns a uniformly distributed float32 value in range (0,1), -// based on given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandFloat32(counter: su64, funcIndex: u32, key: u32) -> f32 { - return Uint32ToFloat32(RandUint32(counter, funcIndex, key)); -} - -// RandFloat32Range11Vec2 returns two uniformly distributed float32 values in range [-1,1], -// based on given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandFloat32Range11Vec2(counter: su64, funcIndex: u32, key: u32) -> vec2 { - return Uint32ToFloat32Vec2(RandUint32Vec2(counter, funcIndex, key)); -} - -// RandFloat32Range11 returns a uniformly distributed float32 value in range [-1,1], -// based on given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandFloat32Range11(counter: su64, funcIndex: u32, key: u32) -> f32 { - return Uint32ToFloat32Range11(RandUint32(counter, funcIndex, key)); -} - -// RandBoolP returns a bool true value with probability p -// based on given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandBoolP(counter: su64, funcIndex: u32, key: u32, p: f32) -> bool { - return (RandFloat32(counter, funcIndex, key) < p); -} - -fn sincospi(x: f32) -> vec2 { - let PIf = 3.1415926535897932; - var r: vec2; - r.x = cos(PIf*x); - r.y = sin(PIf*x); - return r; -} - -// RandFloat32NormVec2 returns two random float32 numbers -// distributed according to the normal, Gaussian distribution -// with zero mean and unit variance. -// This is done very efficiently using the Box-Muller algorithm -// that consumes two random 32 bit uint values. -// Uses given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandFloat32NormVec2(counter: su64, funcIndex: u32, key: u32) -> vec2 { - let ur = RandUint32Vec2(counter, funcIndex, key); - var f = sincospi(Uint32ToFloat32Range11(ur.x)); - let r = sqrt(-2.0 * log(Uint32ToFloat32(ur.y))); // guaranteed to avoid 0. - return f * r; -} - -// RandFloat32Norm returns a random float32 number -// distributed according to the normal, Gaussian distribution -// with zero mean and unit variance. -// Uses given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandFloat32Norm(counter: su64, funcIndex: u32, key: u32) -> f32 { - return RandFloat32Vec2(counter, funcIndex, key).x; -} - -// RandUint32N returns a uint32 in the range [0,N). -// Uses given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandUint32N(counter: su64, funcIndex: u32, key: u32, n: u32) -> u32 { - let v = RandFloat32(counter, funcIndex, key); - return u32(v * f32(n)); -} - -// Counter is used for storing the random counter using aligned 16 byte -// storage, with convenience functions for typical use cases. -// It retains a copy of the last Seed value, which is applied to -// the Hi uint32 value. -struct RandCounter { - Counter: su64, - HiSeed: u32, - pad: u32, -} - -// Reset resets counter to last set Seed state. -fn RandCounter_Reset(ct: ptr) { - (*ct).Counter.x = u32(0); - (*ct).Counter.y = (*ct).HiSeed; -} - -// Seed sets the Hi uint32 value from given seed, saving it in Seed field. -// Each increment in seed generates a unique sequence of over 4 billion numbers, -// so it is reasonable to just use incremental values there, but more widely -// spaced numbers will result in longer unique sequences. -// Resets Lo to 0. -// This same seed will be restored during Reset -fn RandCounter_Seed(ct: ptr, seed: u32) { - (*ct).HiSeed = seed; - RandCounter_Reset(ct); -} - -// Add increments the counter by given amount. -// Call this after completing a pass of computation -// where the value passed here is the max of funcIndex+1 -// used for any possible random calls during that pass. -fn RandCounter_Add(ct: ptr, inc: u32) { - (*ct).Counter = Uint64Add32((*ct).Counter, inc); -} diff --git a/gpu/gosl/examples/rand/shaders/sltype.wgsl b/gpu/gosl/examples/rand/shaders/sltype.wgsl deleted file mode 100644 index e3ffe9e8e6..0000000000 --- a/gpu/gosl/examples/rand/shaders/sltype.wgsl +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Original file is in Go package: github.com/cogentcore/core/gpu/gosl/sltype -// See README.md there for documentation. - -// This file emulates uint64 (u64) using 2 uint32 integers. -// and defines conversions between uint and float. - -// define a u64 type as an alias. -// if / when u64 actually happens, will make it easier to update. -alias su64 = vec2; - -// Uint32Mul64 multiplies two uint32 numbers into a uint64 (using vec2). -fn Uint32Mul64(a: u32, b: u32) -> su64 { - let LOMASK = (((u32(1))<<16)-1); - var r: su64; - r.x = a * b; /* full low multiply */ - let ahi = a >> 16; - let alo = a & LOMASK; - let bhi = b >> 16; - let blo = b & LOMASK; - - let ahbl = ahi * blo; - let albh = alo * bhi; - - let ahbl_albh = ((ahbl&LOMASK) + (albh&LOMASK)); - var hit = ahi*bhi + (ahbl>>16) + (albh>>16); - hit += ahbl_albh >> 16; /* carry from the sum of lo(ahbl) + lo(albh) ) */ - /* carry from the sum with alo*blo */ - if ((r.x >> u32(16)) < (ahbl_albh&LOMASK)) { - hit += u32(1); - } - r.y = hit; - return r; -} - -/* -// Uint32Mul64 multiplies two uint32 numbers into a uint64 (using su64). -fn Uint32Mul64(a: u32, b: u32) -> su64 { - return su64(a) * su64(b); -} -*/ - - -// Uint64Add32 adds given uint32 number to given uint64 (using vec2). -fn Uint64Add32(a: su64, b: u32) -> su64 { - if (b == 0) { - return a; - } - var s = a; - if (s.x > u32(0xffffffff) - b) { - s.y++; - s.x = (b - 1) - (u32(0xffffffff) - s.x); - } else { - s.x += b; - } - return s; -} - -// Uint64Incr returns increment of the given uint64 (using vec2). -fn Uint64Incr(a: su64) -> su64 { - var s = a; - if(s.x == 0xffffffff) { - s.y++; - s.x = u32(0); - } else { - s.x++; - } - return s; -} - -// Uint32ToFloat32 converts a uint32 integer into a float32 -// in the (0,1) interval (i.e., exclusive of 1). -// This differs from the Go standard by excluding 0, which is handy for passing -// directly to Log function, and from the reference Philox code by excluding 1 -// which is in the Go standard and most other standard RNGs. -fn Uint32ToFloat32(val: u32) -> f32 { - let factor = f32(1.0) / (f32(u32(0xffffffff)) + f32(1.0)); - let halffactor = f32(0.5) * factor; - var f = f32(val) * factor + halffactor; - if (f == 1.0) { // exclude 1 - return bitcast(0x3F7FFFFF); - } - return f; -} - -// note: there is no overloading of user-defined functions -// https://github.com/gpuweb/gpuweb/issues/876 - -// Uint32ToFloat32Vec2 converts two uint 32 bit integers -// into two corresponding 32 bit f32 values -// in the (0,1) interval (i.e., exclusive of 1). -fn Uint32ToFloat32Vec2(val: vec2) -> vec2 { - var r: vec2; - r.x = Uint32ToFloat32(val.x); - r.y = Uint32ToFloat32(val.y); - return r; -} - -// Uint32ToFloat32Range11 converts a uint32 integer into a float32 -// in the [-1..1] interval (inclusive of -1 and 1, never identically == 0). -fn Uint32ToFloat32Range11(val: u32) -> f32 { - let factor = f32(1.0) / (f32(i32(0x7fffffff)) + f32(1.0)); - let halffactor = f32(0.5) * factor; - return (f32(val) * factor + halffactor); -} - -// Uint32ToFloat32Range11Vec2 converts two uint32 integers into two float32 -// in the [-1,1] interval (inclusive of -1 and 1, never identically == 0). -fn Uint32ToFloat32Range11Vec2(val: vec2) -> vec2 { - var r: vec2; - r.x = Uint32ToFloat32Range11(val.x); - r.y = Uint32ToFloat32Range11(val.y); - return r; -} - - diff --git a/gpu/gosl/extract.go b/gpu/gosl/extract.go deleted file mode 100644 index 5a41aa1649..0000000000 --- a/gpu/gosl/extract.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright (c) 2022, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "slices" -) - -func ReadFileLines(fn string) ([][]byte, error) { - nl := []byte("\n") - buf, err := os.ReadFile(fn) - if err != nil { - fmt.Println(err) - return nil, err - } - lines := bytes.Split(buf, nl) - return lines, nil -} - -// Extracts comment-directive tagged regions from .go files -func ExtractGoFiles(files []string) map[string][]byte { - sls := map[string][][]byte{} - key := []byte("//gosl:") - start := []byte("start") - wgsl := []byte("wgsl") - nowgsl := []byte("nowgsl") - end := []byte("end") - nl := []byte("\n") - include := []byte("#include") - - for _, fn := range files { - if !strings.HasSuffix(fn, ".go") { - continue - } - lines, err := ReadFileLines(fn) - if err != nil { - continue - } - - inReg := false - inHlsl := false - inNoHlsl := false - var outLns [][]byte - slFn := "" - for _, ln := range lines { - tln := bytes.TrimSpace(ln) - isKey := bytes.HasPrefix(tln, key) - var keyStr []byte - if isKey { - keyStr = tln[len(key):] - // fmt.Printf("key: %s\n", string(keyStr)) - } - switch { - case inReg && isKey && bytes.HasPrefix(keyStr, end): - if inHlsl || inNoHlsl { - outLns = append(outLns, ln) - } - sls[slFn] = outLns - inReg = false - inHlsl = false - inNoHlsl = false - case inReg: - for pkg := range LoadedPackageNames { // remove package prefixes - if !bytes.Contains(ln, include) { - ln = bytes.ReplaceAll(ln, []byte(pkg+"."), []byte{}) - } - } - outLns = append(outLns, ln) - case isKey && bytes.HasPrefix(keyStr, start): - inReg = true - slFn = string(keyStr[len(start)+1:]) - outLns = sls[slFn] - case isKey && bytes.HasPrefix(keyStr, nowgsl): - inReg = true - inNoHlsl = true - slFn = string(keyStr[len(nowgsl)+1:]) - outLns = sls[slFn] - outLns = append(outLns, ln) // key to include self here - case isKey && bytes.HasPrefix(keyStr, wgsl): - inReg = true - inHlsl = true - slFn = string(keyStr[len(wgsl)+1:]) - outLns = sls[slFn] - outLns = append(outLns, ln) - } - } - } - - rsls := make(map[string][]byte) - for fn, lns := range sls { - outfn := filepath.Join(*outDir, fn+".go") - olns := [][]byte{} - olns = append(olns, []byte("package main")) - olns = append(olns, []byte(`import ( - "math" - "cogentcore.org/core/gpu/gosl/slbool" - "cogentcore.org/core/gpu/gosl/slrand" - "cogentcore.org/core/gpu/gosl/sltype" -) -`)) - olns = append(olns, lns...) - SlBoolReplace(olns) - res := bytes.Join(olns, nl) - ioutil.WriteFile(outfn, res, 0644) - // not necessary and super slow: - // cmd := exec.Command("goimports", "-w", fn+".go") // get imports - // cmd.Dir, _ = filepath.Abs(*outDir) - // out, err := cmd.CombinedOutput() - // _ = out - // // fmt.Printf("\n################\ngoimports output for: %s\n%s\n", outfn, out) - // if err != nil { - // log.Println(err) - // } - rsls[fn] = bytes.Join(lns, nl) - } - - return rsls -} - -// ExtractWGSL extracts the WGSL code embedded within .Go files. -// Returns true if WGSL contains a void main( function. -func ExtractWGSL(buf []byte) ([]byte, bool) { - key := []byte("//gosl:") - wgsl := []byte("wgsl") - nowgsl := []byte("nowgsl") - end := []byte("end") - nl := []byte("\n") - stComment := []byte("/*") - edComment := []byte("*/") - comment := []byte("// ") - pack := []byte("package") - imp := []byte("import") - main := []byte("void main(") - lparen := []byte("(") - rparen := []byte(")") - - lines := bytes.Split(buf, nl) - - mx := min(10, len(lines)) - stln := 0 - gotImp := false - for li := 0; li < mx; li++ { - ln := lines[li] - switch { - case bytes.HasPrefix(ln, pack): - stln = li + 1 - case bytes.HasPrefix(ln, imp): - if bytes.HasSuffix(ln, lparen) { - gotImp = true - } else { - stln = li + 1 - } - case gotImp && bytes.HasPrefix(ln, rparen): - stln = li + 1 - } - } - - lines = lines[stln:] // get rid of package, import - - hasMain := false - inHlsl := false - inNoHlsl := false - noHlslStart := 0 - for li := 0; li < len(lines); li++ { - ln := lines[li] - isKey := bytes.HasPrefix(ln, key) - var keyStr []byte - if isKey { - keyStr = ln[len(key):] - // fmt.Printf("key: %s\n", string(keyStr)) - } - switch { - case inNoHlsl && isKey && bytes.HasPrefix(keyStr, end): - lines = slices.Delete(lines, noHlslStart, li+1) - li -= ((li + 1) - noHlslStart) - inNoHlsl = false - case inHlsl && isKey && bytes.HasPrefix(keyStr, end): - lines = slices.Delete(lines, li, li+1) - li-- - inHlsl = false - case inHlsl: - del := false - switch { - case bytes.HasPrefix(ln, stComment) || bytes.HasPrefix(ln, edComment): - lines = slices.Delete(lines, li, li+1) - li-- - del = true - case bytes.HasPrefix(ln, comment): - lines[li] = ln[3:] - } - if !del { - if bytes.HasPrefix(lines[li], main) { - hasMain = true - } - } - case isKey && bytes.HasPrefix(keyStr, wgsl): - inHlsl = true - lines = slices.Delete(lines, li, li+1) - li-- - case isKey && bytes.HasPrefix(keyStr, nowgsl): - inNoHlsl = true - noHlslStart = li - } - } - return bytes.Join(lines, nl), hasMain -} diff --git a/gpu/gosl/files.go b/gpu/gosl/files.go deleted file mode 100644 index c37feaf0e6..0000000000 --- a/gpu/gosl/files.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright (c) 2022, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "fmt" - "io" - "io/fs" - "log" - "os" - "path/filepath" - "strings" - - "golang.org/x/tools/go/packages" -) - -// LoadedPackageNames are single prefix names of packages that were -// loaded in the list of files to process -var LoadedPackageNames = map[string]bool{} - -func IsGoFile(f fs.DirEntry) bool { - name := f.Name() - return !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go") && !f.IsDir() -} - -func IsWGSLFile(f fs.DirEntry) bool { - name := f.Name() - return !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".wgsl") && !f.IsDir() -} - -func IsSPVFile(f fs.DirEntry) bool { - name := f.Name() - return !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".spv") && !f.IsDir() -} - -func AddFile(fn string, fls []string, procd map[string]bool) []string { - if _, has := procd[fn]; has { - return fls - } - fls = append(fls, fn) - procd[fn] = true - dir, _ := filepath.Split(fn) - if dir != "" { - dir = dir[:len(dir)-1] - pd, sd := filepath.Split(dir) - if pd != "" { - dir = sd - } - if !(dir == "math32") { - if _, has := LoadedPackageNames[dir]; !has { - LoadedPackageNames[dir] = true - // fmt.Printf("package: %s\n", dir) - } - } - } - return fls -} - -// FilesFromPaths processes all paths and returns a full unique list of files -// for subsequent processing. -func FilesFromPaths(paths []string) []string { - fls := make([]string, 0, len(paths)) - procd := make(map[string]bool) - for _, path := range paths { - switch info, err := os.Stat(path); { - case err != nil: - var pkgs []*packages.Package - dir, fl := filepath.Split(path) - if dir != "" && fl != "" && strings.HasSuffix(fl, ".go") { - pkgs, err = packages.Load(&packages.Config{Mode: packages.NeedName | packages.NeedFiles}, dir) - } else { - fl = "" - pkgs, err = packages.Load(&packages.Config{Mode: packages.NeedName | packages.NeedFiles}, path) - } - if err != nil { - fmt.Println(err) - continue - } - pkg := pkgs[0] - gofls := pkg.GoFiles - if len(gofls) == 0 { - fmt.Printf("WARNING: no go files found in path: %s\n", path) - } - if fl != "" { - for _, gf := range gofls { - if strings.HasSuffix(gf, fl) { - fls = AddFile(gf, fls, procd) - // fmt.Printf("added file: %s from package: %s\n", gf, path) - break - } - } - } else { - for _, gf := range gofls { - fls = AddFile(gf, fls, procd) - // fmt.Printf("added file: %s from package: %s\n", gf, path) - } - } - case !info.IsDir(): - path := path - fls = AddFile(path, fls, procd) - default: - // Directories are walked, ignoring non-Go, non-WGSL files. - err := filepath.WalkDir(path, func(path string, f fs.DirEntry, err error) error { - if err != nil || !(IsGoFile(f) || IsWGSLFile(f)) { - return err - } - _, err = f.Info() - if err != nil { - return nil - } - fls = AddFile(path, fls, procd) - return nil - }) - if err != nil { - log.Println(err) - } - } - } - return fls -} - -func CopyFile(src, dst string) error { - in, err := os.Open(src) - if err != nil { - return err - } - defer in.Close() - out, err := os.Create(dst) - if err != nil { - return err - } - defer out.Close() - _, err = io.Copy(out, in) - return err -} - -// CopyPackageFile copies given file name from given package path -// into the current output directory. -// e.g., "slrand.wgsl", "cogentcore.org/core/gpu/gosl/slrand" -func CopyPackageFile(fnm, packagePath string) error { - tofn := filepath.Join(*outDir, fnm) - pkgs, err := packages.Load(&packages.Config{Mode: packages.NeedName | packages.NeedFiles}, packagePath) - if err != nil { - fmt.Println(err) - return err - } - if len(pkgs) != 1 { - err = fmt.Errorf("%s package not found", packagePath) - fmt.Println(err) - return err - } - pkg := pkgs[0] - var fn string - if len(pkg.GoFiles) > 0 { - fn = pkg.GoFiles[0] - } else if len(pkg.OtherFiles) > 0 { - fn = pkg.GoFiles[0] - } else { - err = fmt.Errorf("No files found in package: %s", packagePath) - fmt.Println(err) - return err - } - dir, _ := filepath.Split(fn) - fmfn := filepath.Join(dir, fnm) - CopyFile(fmfn, tofn) - return nil -} - -// RemoveGenFiles removes .go, .wgsl, .spv files in shader generated dir -func RemoveGenFiles(dir string) { - err := filepath.WalkDir(dir, func(path string, f fs.DirEntry, err error) error { - if err != nil { - return err - } - if IsGoFile(f) || IsWGSLFile(f) || IsSPVFile(f) { - os.Remove(path) - } - return nil - }) - if err != nil { - log.Println(err) - } -} diff --git a/gpu/gosl/gosl.go b/gpu/gosl/gosl.go deleted file mode 100644 index e7638857d2..0000000000 --- a/gpu/gosl/gosl.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright (c) 2022, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// copied and heavily edited from go src/cmd/gofmt/gofmt.go: - -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "flag" - "fmt" - "os" - "strings" - - "cogentcore.org/core/gpu/gosl/slprint" -) - -// flags -var ( - outDir = flag.String("out", "shaders", "output directory for shader code, relative to where gosl is invoked; must not be an empty string") - excludeFunctions = flag.String("exclude", "Update,Defaults", "comma-separated list of names of functions to exclude from exporting to WGSL") - keepTmp = flag.Bool("keep", false, "keep temporary converted versions of the source files, for debugging") - debug = flag.Bool("debug", false, "enable debugging messages while running") - excludeFunctionMap = map[string]bool{} -) - -// Keep these in sync with go/format/format.go. -const ( - tabWidth = 8 - printerMode = slprint.UseSpaces | slprint.TabIndent | printerNormalizeNumbers - - // printerNormalizeNumbers means to canonicalize number literal prefixes - // and exponents while printing. See https://golang.org/doc/go1.13#gosl. - // - // This value is defined in go/printer specifically for go/format and cmd/gosl. - printerNormalizeNumbers = 1 << 30 -) - -func usage() { - fmt.Fprintf(os.Stderr, "usage: gosl [flags] [path ...]\n") - flag.PrintDefaults() -} - -func main() { - flag.Usage = usage - flag.Parse() - goslMain() -} - -func GoslArgs() { - exs := *excludeFunctions - ex := strings.Split(exs, ",") - for _, fn := range ex { - excludeFunctionMap[fn] = true - } -} - -func goslMain() { - if *outDir == "" { - fmt.Println("Must have an output directory (default shaders), specified in -out arg") - os.Exit(1) - return - } - - if gomod := os.Getenv("GO111MODULE"); gomod == "off" { - fmt.Println("gosl only works in go modules mode, but GO111MODULE=off") - os.Exit(1) - return - } - - os.MkdirAll(*outDir, 0755) - RemoveGenFiles(*outDir) - - args := flag.Args() - if len(args) == 0 { - fmt.Printf("at least one file name must be passed\n") - return - } - - GoslArgs() - ProcessFiles(args) -} diff --git a/gpu/gosl/gosl_test.go b/gpu/gosl/gosl_test.go deleted file mode 100644 index 8a8ec76b5a..0000000000 --- a/gpu/gosl/gosl_test.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "bytes" - "flag" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/stretchr/testify/assert" -) - -var update = flag.Bool("update", false, "update .golden files") - -func runTest(t *testing.T, in, out string) { - // process flags - _, err := os.Lstat(in) - if err != nil { - t.Error(err) - return - } - - sls, err := ProcessFiles([]string{in}) - if err != nil { - t.Error(err) - return - } - - expected, err := os.ReadFile(out) - if err != nil { - t.Error(err) - return - } - - var got []byte - for _, b := range sls { - got = b - break - } - - if !bytes.Equal(got, expected) { - if *update { - if in != out { - if err := os.WriteFile(out, got, 0666); err != nil { - t.Error(err) - } - return - } - // in == out: don't accidentally destroy input - t.Errorf("WARNING: -update did not rewrite input file %s", in) - } - - assert.Equal(t, string(expected), string(got)) - if err := os.WriteFile(in+".gosl", got, 0666); err != nil { - t.Error(err) - } - } -} - -// TestRewrite processes testdata/*.input files and compares them to the -// corresponding testdata/*.golden files. The gosl flags used to process -// a file must be provided via a comment of the form -// -// //gosl flags -// -// in the processed file within the first 20 lines, if any. -func TestRewrite(t *testing.T) { - if gomod := os.Getenv("GO111MODULE"); gomod == "off" { - t.Error("gosl only works in go modules mode, but GO111MODULE=off") - return - } - - // determine input files - match, err := filepath.Glob("testdata/*.go") - if err != nil { - t.Fatal(err) - } - - if *outDir != "" { - os.MkdirAll(*outDir, 0755) - } - - for _, in := range match { - name := filepath.Base(in) - t.Run(name, func(t *testing.T) { - out := in // for files where input and output are identical - if strings.HasSuffix(in, ".go") { - out = in[:len(in)-len(".go")] + ".golden" - } - runTest(t, in, out) - }) - } -} diff --git a/gpu/gosl/process.go b/gpu/gosl/process.go deleted file mode 100644 index de818b4903..0000000000 --- a/gpu/gosl/process.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "bytes" - "fmt" - "go/ast" - "go/token" - "io/fs" - "io/ioutil" - "log" - "os" - "os/exec" - "path/filepath" - "strings" - - "cogentcore.org/core/base/errors" - "cogentcore.org/core/gpu" - "cogentcore.org/core/gpu/gosl/alignsl" - "cogentcore.org/core/gpu/gosl/slprint" - "golang.org/x/tools/go/packages" -) - -// does all the file processing -func ProcessFiles(paths []string) (map[string][]byte, error) { - fls := FilesFromPaths(paths) - gosls := ExtractGoFiles(fls) // extract Go files to shader/*.go - - wgslFiles := []string{} - for _, fn := range fls { - if strings.HasSuffix(fn, ".wgsl") { - wgslFiles = append(wgslFiles, fn) - } - } - - pf := "./" + *outDir - pkgs, err := packages.Load(&packages.Config{Mode: packages.NeedName | packages.NeedFiles | packages.NeedTypes | packages.NeedSyntax | packages.NeedTypesSizes | packages.NeedTypesInfo}, pf) - if err != nil { - log.Println(err) - return nil, err - } - if len(pkgs) != 1 { - err := fmt.Errorf("More than one package for path: %v", pf) - log.Println(err) - return nil, err - } - pkg := pkgs[0] - - if len(pkg.GoFiles) == 0 { - err := fmt.Errorf("No Go files found in package: %+v", pkg) - log.Println(err) - return nil, err - } - // fmt.Printf("go files: %+v", pkg.GoFiles) - // return nil, err - - // map of files with a main function that needs to be compiled - needsCompile := map[string]bool{} - - serr := alignsl.CheckPackage(pkg) - if serr != nil { - fmt.Println(serr) - } - - slrandCopied := false - sltypeCopied := false - for fn := range gosls { - gofn := fn + ".go" - if *debug { - fmt.Printf("###################################\nProcessing Go file: %s\n", gofn) - } - - var afile *ast.File - var fpos token.Position - for _, sy := range pkg.Syntax { - pos := pkg.Fset.Position(sy.Package) - _, posfn := filepath.Split(pos.Filename) - if posfn == gofn { - fpos = pos - afile = sy - break - } - } - if afile == nil { - fmt.Printf("Warning: File named: %s not found in processed package\n", gofn) - continue - } - - var buf bytes.Buffer - cfg := slprint.Config{Mode: printerMode, Tabwidth: tabWidth, ExcludeFunctions: excludeFunctionMap} - cfg.Fprint(&buf, pkg, afile) - // ioutil.WriteFile(filepath.Join(*outDir, fn+".tmp"), buf.Bytes(), 0644) - slfix, hasSltype, hasSlrand := SlEdits(buf.Bytes()) - if hasSlrand && !slrandCopied { - hasSltype = true - if *debug { - fmt.Printf("\tcopying slrand.wgsl to shaders\n") - } - CopyPackageFile("slrand.wgsl", "cogentcore.org/core/gpu/gosl/slrand") - slrandCopied = true - } - if hasSltype && !sltypeCopied { - if *debug { - fmt.Printf("\tcopying sltype.wgsl to shaders\n") - } - CopyPackageFile("sltype.wgsl", "cogentcore.org/core/gpu/gosl/sltype") - sltypeCopied = true - } - exsl, hasMain := ExtractWGSL(slfix) - gosls[fn] = exsl - - if hasMain { - needsCompile[fn] = true - } - if !*keepTmp { - os.Remove(fpos.Filename) - } - - // add wgsl code - for _, slfn := range wgslFiles { - if fn+".wgsl" != slfn { - continue - } - buf, err := os.ReadFile(slfn) - if err != nil { - fmt.Println(err) - continue - } - exsl = append(exsl, []byte(fmt.Sprintf("\n// from file: %s\n", slfn))...) - exsl = append(exsl, buf...) - gosls[fn] = exsl - needsCompile[fn] = true // assume any standalone has main - break - } - - slfn := filepath.Join(*outDir, fn+".wgsl") - ioutil.WriteFile(slfn, exsl, 0644) - } - - // check for wgsl files that had no go equivalent - for _, slfn := range wgslFiles { - hasGo := false - for fn := range gosls { - if fn+".wgsl" == slfn { - hasGo = true - break - } - } - if hasGo { - continue - } - _, slfno := filepath.Split(slfn) // could be in a subdir - tofn := filepath.Join(*outDir, slfno) - CopyFile(slfn, tofn) - fn := strings.TrimSuffix(slfno, ".wgsl") - needsCompile[fn] = true // assume any standalone wgsl is a main - } - - for fn := range needsCompile { - CompileFile(fn + ".wgsl") - } - return gosls, nil -} - -func CompileFile(fn string) error { - dir, _ := filepath.Abs(*outDir) - fsys := os.DirFS(dir) - b, err := fs.ReadFile(fsys, fn) - if errors.Log(err) != nil { - return err - } - is := gpu.IncludeFS(fsys, "", string(b)) - ofn := filepath.Join(dir, fn) - err = os.WriteFile(ofn, []byte(is), 0666) - if errors.Log(err) != nil { - return err - } - cmd := exec.Command("naga", fn) - cmd.Dir = dir - out, err := cmd.CombinedOutput() - fmt.Printf("\n-----------------------------------------------------\nnaga output for: %s\n%s", fn, out) - if err != nil { - log.Println(err) - return err - } - return nil -} diff --git a/gpu/gosl/slbool/README.md b/gpu/gosl/slbool/README.md deleted file mode 100644 index c69345a415..0000000000 --- a/gpu/gosl/slbool/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# slbool - -`slbool` defines a WGSL and Go friendly `int32` Bool type. The standard WGSL bool type causes obscure errors, and the int32 obeys the 4 byte basic alignment requirements. - -`gosl` automatically converts this Go code into appropriate WGSL code. - - diff --git a/gpu/gosl/slbool/slbool.go b/gpu/gosl/slbool/slbool.go deleted file mode 100644 index b277fa8a14..0000000000 --- a/gpu/gosl/slbool/slbool.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) 2019, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -package slbool defines a WGSL friendly int32 Bool type. -The standard WGSL bool type causes obscure errors, -and the int32 obeys the 4 byte basic alignment requirements. - -gosl automatically converts this Go code into appropriate WGSL code. -*/ -package slbool - -// Bool is an WGSL friendly int32 Bool type. -type Bool int32 - -const ( - // False is the [Bool] false value - False Bool = 0 - // True is the [Bool] true value - True Bool = 1 -) - -// Bool returns the Bool as a standard Go bool -func (b Bool) Bool() bool { - return b == True -} - -// IsTrue returns whether the bool is true -func (b Bool) IsTrue() bool { - return b == True -} - -// IsFalse returns whether the bool is false -func (b Bool) IsFalse() bool { - return b == False -} - -// SetBool sets the Bool from a standard Go bool -func (b *Bool) SetBool(bb bool) { - *b = FromBool(bb) -} - -// String returns the bool as a string ("true"/"false") -func (b Bool) String() string { - if b.IsTrue() { - return "true" - } - return "false" -} - -// FromString sets the bool from the given string -func (b *Bool) FromString(s string) { - if s == "true" || s == "True" { - b.SetBool(true) - } else { - b.SetBool(false) - } - -} - -// MarshalText implements the [encoding/text.Marshaler] interface -func (b Bool) MarshalText() ([]byte, error) { return []byte(b.String()), nil } - -// UnmarshalText implements the [encoding/text.Unmarshaler] interface -func (b *Bool) UnmarshalText(s []byte) error { b.FromString(string(s)); return nil } - -// IsTrue returns whether the given bool is true -func IsTrue(b Bool) bool { - return b == True -} - -// IsFalse returns whether the given bool is false -func IsFalse(b Bool) bool { - return b == False -} - -// FromBool returns the given Go bool as a [Bool] -func FromBool(b bool) Bool { - if b { - return True - } - return False -} diff --git a/gpu/gosl/slbool/slboolcore/slboolcore.go b/gpu/gosl/slbool/slboolcore/slboolcore.go deleted file mode 100644 index a9d44c489a..0000000000 --- a/gpu/gosl/slbool/slboolcore/slboolcore.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) 2023, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slboolcore - -import ( - "cogentcore.org/core/core" - "cogentcore.org/core/gpu/gosl/slbool" -) - -func init() { - core.AddValueType[slbool.Bool, core.Switch]() -} diff --git a/gpu/gosl/sledits.go b/gpu/gosl/sledits.go deleted file mode 100644 index b9ea5e2318..0000000000 --- a/gpu/gosl/sledits.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2024 Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "bytes" -) - -// MoveLines moves the st,ed region to 'to' line -func MoveLines(lines *[][]byte, to, st, ed int) { - mvln := (*lines)[st:ed] - btwn := (*lines)[to:st] - aft := (*lines)[ed:len(*lines)] - nln := make([][]byte, to, len(*lines)) - copy(nln, (*lines)[:to]) - nln = append(nln, mvln...) - nln = append(nln, btwn...) - nln = append(nln, aft...) - *lines = nln -} - -// SlEdits performs post-generation edits for wgsl -// * moves wgsl segments around, e.g., methods -// into their proper classes -// * fixes printf, slice other common code -// returns true if a slrand. or sltype. prefix was found, -// driveing copying of those files. -func SlEdits(src []byte) ([]byte, bool, bool) { - // return src // uncomment to show original without edits - nl := []byte("\n") - lines := bytes.Split(src, nl) - hasSlrand, hasSltype := SlEditsReplace(lines) - - return bytes.Join(lines, nl), hasSlrand, hasSltype -} - -type Replace struct { - From, To []byte -} - -var Replaces = []Replace{ - {[]byte("sltype.Uint32Vec2"), []byte("vec2")}, - {[]byte("sltype.Float32Vec2"), []byte("vec2")}, - {[]byte("float32"), []byte("f32")}, - {[]byte("float64"), []byte("f64")}, // TODO: not yet supported - {[]byte("uint32"), []byte("u32")}, - {[]byte("uint64"), []byte("su64")}, - {[]byte("int32"), []byte("i32")}, - {[]byte("math32.FastExp("), []byte("FastExp(")}, // FastExp about same speed, numerically identical - // {[]byte("math32.FastExp("), []byte("exp(")}, // exp is slightly faster it seems - {[]byte("math.Float32frombits("), []byte("bitcast(")}, - {[]byte("math.Float32bits("), []byte("bitcast(")}, - {[]byte("shaders."), []byte("")}, - {[]byte("slrand."), []byte("Rand")}, - {[]byte("RandUi32"), []byte("RandUint32")}, // fix int32 -> i32 - {[]byte(".SetFromVector2("), []byte("=(")}, - {[]byte(".SetFrom2("), []byte("=(")}, - {[]byte(".IsTrue()"), []byte("==1")}, - {[]byte(".IsFalse()"), []byte("==0")}, - {[]byte(".SetBool(true)"), []byte("=1")}, - {[]byte(".SetBool(false)"), []byte("=0")}, - {[]byte(".SetBool("), []byte("=i32(")}, - {[]byte("slbool.Bool"), []byte("i32")}, - {[]byte("slbool.True"), []byte("1")}, - {[]byte("slbool.False"), []byte("0")}, - {[]byte("slbool.IsTrue("), []byte("(1 == ")}, - {[]byte("slbool.IsFalse("), []byte("(0 == ")}, - {[]byte("slbool.FromBool("), []byte("i32(")}, - {[]byte("bools.ToFloat32("), []byte("f32(")}, - {[]byte("bools.FromFloat32("), []byte("bool(")}, - {[]byte("num.FromBool[f32]("), []byte("f32(")}, - {[]byte("num.ToBool("), []byte("bool(")}, - // todo: do this conversion in nodes only for correct types - // {[]byte(".X"), []byte(".x")}, - // {[]byte(".Y"), []byte(".y")}, - // {[]byte(".Z"), []byte(".z")}, - // {[]byte(""), []byte("")}, - // {[]byte(""), []byte("")}, - // {[]byte(""), []byte("")}, -} - -func MathReplaceAll(mat, ln []byte) []byte { - ml := len(mat) - st := 0 - for { - sln := ln[st:] - i := bytes.Index(sln, mat) - if i < 0 { - return ln - } - fl := ln[st+i+ml : st+i+ml+1] - dl := bytes.ToLower(fl) - el := ln[st+i+ml+1:] - ln = append(ln[:st+i], dl...) - ln = append(ln, el...) - st += i + 1 - } -} - -// SlEditsReplace replaces Go with equivalent WGSL code -// returns true if has slrand. or sltype. -// to auto include that header file if so. -func SlEditsReplace(lines [][]byte) (bool, bool) { - mt32 := []byte("math32.") - mth := []byte("math.") - slr := []byte("slrand.") - styp := []byte("sltype.") - include := []byte("#include") - hasSlrand := false - hasSltype := false - for li, ln := range lines { - if bytes.Contains(ln, include) { - continue - } - for _, r := range Replaces { - if !hasSlrand && bytes.Contains(ln, slr) { - hasSlrand = true - } - if !hasSltype && bytes.Contains(ln, styp) { - hasSltype = true - } - ln = bytes.ReplaceAll(ln, r.From, r.To) - } - ln = MathReplaceAll(mt32, ln) - ln = MathReplaceAll(mth, ln) - lines[li] = ln - } - return hasSlrand, hasSltype -} - -var SLBools = []Replace{ - {[]byte(".IsTrue()"), []byte("==1")}, - {[]byte(".IsFalse()"), []byte("==0")}, - {[]byte(".SetBool(true)"), []byte("=1")}, - {[]byte(".SetBool(false)"), []byte("=0")}, - {[]byte(".SetBool("), []byte("=int32(")}, - {[]byte("slbool.Bool"), []byte("int32")}, - {[]byte("slbool.True"), []byte("1")}, - {[]byte("slbool.False"), []byte("0")}, - {[]byte("slbool.IsTrue("), []byte("(1 == ")}, - {[]byte("slbool.IsFalse("), []byte("(0 == ")}, - {[]byte("slbool.FromBool("), []byte("int32(")}, - {[]byte("bools.ToFloat32("), []byte("float32(")}, - {[]byte("bools.FromFloat32("), []byte("bool(")}, - {[]byte("num.FromBool[f32]("), []byte("float32(")}, - {[]byte("num.ToBool("), []byte("bool(")}, -} - -// SlBoolReplace replaces all the slbool methods with literal int32 expressions. -func SlBoolReplace(lines [][]byte) { - for li, ln := range lines { - for _, r := range SLBools { - ln = bytes.ReplaceAll(ln, r.From, r.To) - } - lines[li] = ln - } -} diff --git a/gpu/gosl/slprint/comment.go b/gpu/gosl/slprint/comment.go deleted file mode 100644 index f97a9a2084..0000000000 --- a/gpu/gosl/slprint/comment.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slprint - -import ( - "go/ast" - "go/doc/comment" - "strings" -) - -// formatDocComment reformats the doc comment list, -// returning the canonical formatting. -func formatDocComment(list []*ast.Comment) []*ast.Comment { - // Extract comment text (removing comment markers). - var kind, text string - var directives []*ast.Comment - if len(list) == 1 && strings.HasPrefix(list[0].Text, "/*") { - kind = "/*" - text = list[0].Text - if !strings.Contains(text, "\n") || allStars(text) { - // Single-line /* .. */ comment in doc comment position, - // or multiline old-style comment like - // /* - // * Comment - // * text here. - // */ - // Should not happen, since it will not work well as a - // doc comment, but if it does, just ignore: - // reformatting it will only make the situation worse. - return list - } - text = text[2 : len(text)-2] // cut /* and */ - } else if strings.HasPrefix(list[0].Text, "//") { - kind = "//" - var b strings.Builder - for _, c := range list { - after, found := strings.CutPrefix(c.Text, "//") - if !found { - return list - } - // Accumulate //go:build etc lines separately. - if isDirective(after) { - directives = append(directives, c) - continue - } - b.WriteString(strings.TrimPrefix(after, " ")) - b.WriteString("\n") - } - text = b.String() - } else { - // Not sure what this is, so leave alone. - return list - } - - if text == "" { - return list - } - - // Parse comment and reformat as text. - var p comment.Parser - d := p.Parse(text) - - var pr comment.Printer - text = string(pr.Comment(d)) - - // For /* */ comment, return one big comment with text inside. - slash := list[0].Slash - if kind == "/*" { - c := &ast.Comment{ - Slash: slash, - Text: "/*\n" + text + "*/", - } - return []*ast.Comment{c} - } - - // For // comment, return sequence of // lines. - var out []*ast.Comment - for text != "" { - var line string - line, text, _ = strings.Cut(text, "\n") - if line == "" { - line = "//" - } else if strings.HasPrefix(line, "\t") { - line = "//" + line - } else { - line = "// " + line - } - out = append(out, &ast.Comment{ - Slash: slash, - Text: line, - }) - } - if len(directives) > 0 { - out = append(out, &ast.Comment{ - Slash: slash, - Text: "//", - }) - for _, c := range directives { - out = append(out, &ast.Comment{ - Slash: slash, - Text: c.Text, - }) - } - } - return out -} - -// isDirective reports whether c is a comment directive. -// See go.dev/issue/37974. -// This code is also in go/ast. -func isDirective(c string) bool { - // "//line " is a line directive. - // "//extern " is for gccgo. - // "//export " is for cgo. - // (The // has been removed.) - if strings.HasPrefix(c, "line ") || strings.HasPrefix(c, "extern ") || strings.HasPrefix(c, "export ") { - return true - } - - // "//[a-z0-9]+:[a-z0-9]" - // (The // has been removed.) - colon := strings.Index(c, ":") - if colon <= 0 || colon+1 >= len(c) { - return false - } - for i := 0; i <= colon+1; i++ { - if i == colon { - continue - } - b := c[i] - if !('a' <= b && b <= 'z' || '0' <= b && b <= '9') { - return false - } - } - return true -} - -// allStars reports whether text is the interior of an -// old-style /* */ comment with a star at the start of each line. -func allStars(text string) bool { - for i := 0; i < len(text); i++ { - if text[i] == '\n' { - j := i + 1 - for j < len(text) && (text[j] == ' ' || text[j] == '\t') { - j++ - } - if j < len(text) && text[j] != '*' { - return false - } - } - } - return true -} diff --git a/gpu/gosl/slprint/gobuild.go b/gpu/gosl/slprint/gobuild.go deleted file mode 100644 index fd0c4b4002..0000000000 --- a/gpu/gosl/slprint/gobuild.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slprint - -import ( - "go/build/constraint" - "sort" - "text/tabwriter" -) - -func (p *printer) fixGoBuildLines() { - if len(p.goBuild)+len(p.plusBuild) == 0 { - return - } - - // Find latest possible placement of //go:build and // +build comments. - // That's just after the last blank line before we find a non-comment. - // (We'll add another blank line after our comment block.) - // When we start dropping // +build comments, we can skip over /* */ comments too. - // Note that we are processing tabwriter input, so every comment - // begins and ends with a tabwriter.Escape byte. - // And some newlines have turned into \f bytes. - insert := 0 - for pos := 0; ; { - // Skip leading space at beginning of line. - blank := true - for pos < len(p.output) && (p.output[pos] == ' ' || p.output[pos] == '\t') { - pos++ - } - // Skip over // comment if any. - if pos+3 < len(p.output) && p.output[pos] == tabwriter.Escape && p.output[pos+1] == '/' && p.output[pos+2] == '/' { - blank = false - for pos < len(p.output) && !isNL(p.output[pos]) { - pos++ - } - } - // Skip over \n at end of line. - if pos >= len(p.output) || !isNL(p.output[pos]) { - break - } - pos++ - - if blank { - insert = pos - } - } - - // If there is a //go:build comment before the place we identified, - // use that point instead. (Earlier in the file is always fine.) - if len(p.goBuild) > 0 && p.goBuild[0] < insert { - insert = p.goBuild[0] - } else if len(p.plusBuild) > 0 && p.plusBuild[0] < insert { - insert = p.plusBuild[0] - } - - var x constraint.Expr - switch len(p.goBuild) { - case 0: - // Synthesize //go:build expression from // +build lines. - for _, pos := range p.plusBuild { - y, err := constraint.Parse(p.commentTextAt(pos)) - if err != nil { - x = nil - break - } - if x == nil { - x = y - } else { - x = &constraint.AndExpr{X: x, Y: y} - } - } - case 1: - // Parse //go:build expression. - x, _ = constraint.Parse(p.commentTextAt(p.goBuild[0])) - } - - var block []byte - if x == nil { - // Don't have a valid //go:build expression to treat as truth. - // Bring all the lines together but leave them alone. - // Note that these are already tabwriter-escaped. - for _, pos := range p.goBuild { - block = append(block, p.lineAt(pos)...) - } - for _, pos := range p.plusBuild { - block = append(block, p.lineAt(pos)...) - } - } else { - block = append(block, tabwriter.Escape) - block = append(block, "//go:build "...) - block = append(block, x.String()...) - block = append(block, tabwriter.Escape, '\n') - if len(p.plusBuild) > 0 { - lines, err := constraint.PlusBuildLines(x) - if err != nil { - lines = []string{"// +build error: " + err.Error()} - } - for _, line := range lines { - block = append(block, tabwriter.Escape) - block = append(block, line...) - block = append(block, tabwriter.Escape, '\n') - } - } - } - block = append(block, '\n') - - // Build sorted list of lines to delete from remainder of output. - toDelete := append(p.goBuild, p.plusBuild...) - sort.Ints(toDelete) - - // Collect output after insertion point, with lines deleted, into after. - var after []byte - start := insert - for _, end := range toDelete { - if end < start { - continue - } - after = appendLines(after, p.output[start:end]) - start = end + len(p.lineAt(end)) - } - after = appendLines(after, p.output[start:]) - if n := len(after); n >= 2 && isNL(after[n-1]) && isNL(after[n-2]) { - after = after[:n-1] - } - - p.output = p.output[:insert] - p.output = append(p.output, block...) - p.output = append(p.output, after...) -} - -// appendLines is like append(x, y...) -// but it avoids creating doubled blank lines, -// which would not be gofmt-standard output. -// It assumes that only whole blocks of lines are being appended, -// not line fragments. -func appendLines(x, y []byte) []byte { - if len(y) > 0 && isNL(y[0]) && // y starts in blank line - (len(x) == 0 || len(x) >= 2 && isNL(x[len(x)-1]) && isNL(x[len(x)-2])) { // x is empty or ends in blank line - y = y[1:] // delete y's leading blank line - } - return append(x, y...) -} - -func (p *printer) lineAt(start int) []byte { - pos := start - for pos < len(p.output) && !isNL(p.output[pos]) { - pos++ - } - if pos < len(p.output) { - pos++ - } - return p.output[start:pos] -} - -func (p *printer) commentTextAt(start int) string { - if start < len(p.output) && p.output[start] == tabwriter.Escape { - start++ - } - pos := start - for pos < len(p.output) && p.output[pos] != tabwriter.Escape && !isNL(p.output[pos]) { - pos++ - } - return string(p.output[start:pos]) -} - -func isNL(b byte) bool { - return b == '\n' || b == '\f' -} diff --git a/gpu/gosl/slprint/nodes.go b/gpu/gosl/slprint/nodes.go deleted file mode 100644 index 13ace493a0..0000000000 --- a/gpu/gosl/slprint/nodes.go +++ /dev/null @@ -1,2534 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file implements printing of AST nodes; specifically -// expressions, statements, declarations, and files. It uses -// the print functionality implemented in printer.go. - -package slprint - -import ( - "fmt" - "go/ast" - "go/token" - "go/types" - "math" - "path" - "slices" - "strconv" - "strings" - "unicode" - "unicode/utf8" -) - -// Formatting issues: -// - better comment formatting for /*-style comments at the end of a line (e.g. a declaration) -// when the comment spans multiple lines; if such a comment is just two lines, formatting is -// not idempotent -// - formatting of expression lists -// - should use blank instead of tab to separate one-line function bodies from -// the function header unless there is a group of consecutive one-liners - -// ---------------------------------------------------------------------------- -// Common AST nodes. - -// Print as many newlines as necessary (but at least min newlines) to get to -// the current line. ws is printed before the first line break. If newSection -// is set, the first line break is printed as formfeed. Returns 0 if no line -// breaks were printed, returns 1 if there was exactly one newline printed, -// and returns a value > 1 if there was a formfeed or more than one newline -// printed. -// -// TODO(gri): linebreak may add too many lines if the next statement at "line" -// is preceded by comments because the computation of n assumes -// the current position before the comment and the target position -// after the comment. Thus, after interspersing such comments, the -// space taken up by them is not considered to reduce the number of -// linebreaks. At the moment there is no easy way to know about -// future (not yet interspersed) comments in this function. -func (p *printer) linebreak(line, min int, ws whiteSpace, newSection bool) (nbreaks int) { - n := max(nlimit(line-p.pos.Line), min) - if n > 0 { - p.print(ws) - if newSection { - p.print(formfeed) - n-- - nbreaks = 2 - } - nbreaks += n - for ; n > 0; n-- { - p.print(newline) - } - } - return -} - -// setComment sets g as the next comment if g != nil and if node comments -// are enabled - this mode is used when printing source code fragments such -// as exports only. It assumes that there is no pending comment in p.comments -// and at most one pending comment in the p.comment cache. -func (p *printer) setComment(g *ast.CommentGroup) { - if g == nil || !p.useNodeComments { - return - } - if p.comments == nil { - // initialize p.comments lazily - p.comments = make([]*ast.CommentGroup, 1) - } else if p.cindex < len(p.comments) { - // for some reason there are pending comments; this - // should never happen - handle gracefully and flush - // all comments up to g, ignore anything after that - p.flush(p.posFor(g.List[0].Pos()), token.ILLEGAL) - p.comments = p.comments[0:1] - // in debug mode, report error - p.internalError("setComment found pending comments") - } - p.comments[0] = g - p.cindex = 0 - // don't overwrite any pending comment in the p.comment cache - // (there may be a pending comment when a line comment is - // immediately followed by a lead comment with no other - // tokens between) - if p.commentOffset == infinity { - p.nextComment() // get comment ready for use - } -} - -type exprListMode uint - -const ( - commaTerm exprListMode = 1 << iota // list is optionally terminated by a comma - noIndent // no extra indentation in multi-line lists -) - -// If indent is set, a multi-line identifier list is indented after the -// first linebreak encountered. -func (p *printer) identList(list []*ast.Ident, indent bool) { - // convert into an expression list so we can re-use exprList formatting - xlist := make([]ast.Expr, len(list)) - for i, x := range list { - xlist[i] = x - } - var mode exprListMode - if !indent { - mode = noIndent - } - p.exprList(token.NoPos, xlist, 1, mode, token.NoPos, false) -} - -const filteredMsg = "contains filtered or unexported fields" - -// Print a list of expressions. If the list spans multiple -// source lines, the original line breaks are respected between -// expressions. -// -// TODO(gri) Consider rewriting this to be independent of []ast.Expr -// so that we can use the algorithm for any kind of list -// -// (e.g., pass list via a channel over which to range). -func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exprListMode, next0 token.Pos, isIncomplete bool) { - if len(list) == 0 { - if isIncomplete { - prev := p.posFor(prev0) - next := p.posFor(next0) - if prev.IsValid() && prev.Line == next.Line { - p.print("/* " + filteredMsg + " */") - } else { - p.print(newline) - p.print(indent, "// "+filteredMsg, unindent, newline) - } - } - return - } - - prev := p.posFor(prev0) - next := p.posFor(next0) - line := p.lineFor(list[0].Pos()) - endLine := p.lineFor(list[len(list)-1].End()) - - if prev.IsValid() && prev.Line == line && line == endLine { - // all list entries on a single line - for i, x := range list { - if i > 0 { - // use position of expression following the comma as - // comma position for correct comment placement - p.setPos(x.Pos()) - p.print(token.COMMA, blank) - } - p.expr0(x, depth) - } - if isIncomplete { - p.print(token.COMMA, blank, "/* "+filteredMsg+" */") - } - return - } - - // list entries span multiple lines; - // use source code positions to guide line breaks - - // Don't add extra indentation if noIndent is set; - // i.e., pretend that the first line is already indented. - ws := ignore - if mode&noIndent == 0 { - ws = indent - } - - // The first linebreak is always a formfeed since this section must not - // depend on any previous formatting. - prevBreak := -1 // index of last expression that was followed by a linebreak - if prev.IsValid() && prev.Line < line && p.linebreak(line, 0, ws, true) > 0 { - ws = ignore - prevBreak = 0 - } - - // initialize expression/key size: a zero value indicates expr/key doesn't fit on a single line - size := 0 - - // We use the ratio between the geometric mean of the previous key sizes and - // the current size to determine if there should be a break in the alignment. - // To compute the geometric mean we accumulate the ln(size) values (lnsum) - // and the number of sizes included (count). - lnsum := 0.0 - count := 0 - - // print all list elements - prevLine := prev.Line - for i, x := range list { - line = p.lineFor(x.Pos()) - - // Determine if the next linebreak, if any, needs to use formfeed: - // in general, use the entire node size to make the decision; for - // key:value expressions, use the key size. - // TODO(gri) for a better result, should probably incorporate both - // the key and the node size into the decision process - useFF := true - - // Determine element size: All bets are off if we don't have - // position information for the previous and next token (likely - // generated code - simply ignore the size in this case by setting - // it to 0). - prevSize := size - const infinity = 1e6 // larger than any source line - size = p.nodeSize(x, infinity) - pair, isPair := x.(*ast.KeyValueExpr) - if size <= infinity && prev.IsValid() && next.IsValid() { - // x fits on a single line - if isPair { - size = p.nodeSize(pair.Key, infinity) // size <= infinity - } - } else { - // size too large or we don't have good layout information - size = 0 - } - - // If the previous line and the current line had single- - // line-expressions and the key sizes are small or the - // ratio between the current key and the geometric mean - // if the previous key sizes does not exceed a threshold, - // align columns and do not use formfeed. - if prevSize > 0 && size > 0 { - const smallSize = 40 - if count == 0 || prevSize <= smallSize && size <= smallSize { - useFF = false - } else { - const r = 2.5 // threshold - geomean := math.Exp(lnsum / float64(count)) // count > 0 - ratio := float64(size) / geomean - useFF = r*ratio <= 1 || r <= ratio - } - } - - needsLinebreak := 0 < prevLine && prevLine < line - if i > 0 { - // Use position of expression following the comma as - // comma position for correct comment placement, but - // only if the expression is on the same line. - if !needsLinebreak { - p.setPos(x.Pos()) - } - p.print(token.COMMA) - needsBlank := true - if needsLinebreak { - // Lines are broken using newlines so comments remain aligned - // unless useFF is set or there are multiple expressions on - // the same line in which case formfeed is used. - nbreaks := p.linebreak(line, 0, ws, useFF || prevBreak+1 < i) - if nbreaks > 0 { - ws = ignore - prevBreak = i - needsBlank = false // we got a line break instead - } - // If there was a new section or more than one new line - // (which means that the tabwriter will implicitly break - // the section), reset the geomean variables since we are - // starting a new group of elements with the next element. - if nbreaks > 1 { - lnsum = 0 - count = 0 - } - } - if needsBlank { - p.print(blank) - } - } - - if len(list) > 1 && isPair && size > 0 && needsLinebreak { - // We have a key:value expression that fits onto one line - // and it's not on the same line as the prior expression: - // Use a column for the key such that consecutive entries - // can align if possible. - // (needsLinebreak is set if we started a new line before) - p.expr(pair.Key) - p.setPos(pair.Colon) - p.print(token.COLON, vtab) - p.expr(pair.Value) - } else { - p.expr0(x, depth) - } - - if size > 0 { - lnsum += math.Log(float64(size)) - count++ - } - - prevLine = line - } - - if mode&commaTerm != 0 && next.IsValid() && p.pos.Line < next.Line { - // Print a terminating comma if the next token is on a new line. - p.print(token.COMMA) - if isIncomplete { - p.print(newline) - p.print("// " + filteredMsg) - } - if ws == ignore && mode&noIndent == 0 { - // unindent if we indented - p.print(unindent) - } - p.print(formfeed) // terminating comma needs a line break to look good - return - } - - if isIncomplete { - p.print(token.COMMA, newline) - p.print("// "+filteredMsg, newline) - } - - if ws == ignore && mode&noIndent == 0 { - // unindent if we indented - p.print(unindent) - } -} - -type paramMode int - -const ( - funcParam paramMode = iota - funcTParam - typeTParam -) - -func (p *printer) parameters(fields *ast.FieldList, mode paramMode) { - openTok, closeTok := token.LPAREN, token.RPAREN - if mode != funcParam { - openTok, closeTok = token.LBRACK, token.RBRACK - } - p.setPos(fields.Opening) - p.print(openTok) - if len(fields.List) > 0 { - prevLine := p.lineFor(fields.Opening) - ws := indent - for i, par := range fields.List { - // determine par begin and end line (may be different - // if there are multiple parameter names for this par - // or the type is on a separate line) - parLineBeg := p.lineFor(par.Pos()) - parLineEnd := p.lineFor(par.End()) - // separating "," if needed - needsLinebreak := 0 < prevLine && prevLine < parLineBeg - if i > 0 { - // use position of parameter following the comma as - // comma position for correct comma placement, but - // only if the next parameter is on the same line - if !needsLinebreak { - p.setPos(par.Pos()) - } - p.print(token.COMMA) - } - // separator if needed (linebreak or blank) - if needsLinebreak && p.linebreak(parLineBeg, 0, ws, true) > 0 { - // break line if the opening "(" or previous parameter ended on a different line - ws = ignore - } else if i > 0 { - p.print(blank) - } - // parameter names - if len(par.Names) > 1 { - nnm := len(par.Names) - for ni, nm := range par.Names { - p.print(nm.Name) - p.print(token.COLON) - p.print(blank) - atyp, isPtr := p.ptrType(stripParensAlways(par.Type)) - p.expr(atyp) - if isPtr { - p.print(">") - p.curPtrArgs = append(p.curPtrArgs, par.Names[0]) - } - if ni < nnm-1 { - p.print(token.COMMA) - } - } - } else if len(par.Names) > 0 { - // Very subtle: If we indented before (ws == ignore), identList - // won't indent again. If we didn't (ws == indent), identList will - // indent if the identList spans multiple lines, and it will outdent - // again at the end (and still ws == indent). Thus, a subsequent indent - // by a linebreak call after a type, or in the next multi-line identList - // will do the right thing. - p.identList(par.Names, ws == indent) - p.print(token.COLON) - p.print(blank) - // parameter type -- gosl = type first, replace ptr star with `inout` - atyp, isPtr := p.ptrType(stripParensAlways(par.Type)) - p.expr(atyp) - if isPtr { - p.print(">") - p.curPtrArgs = append(p.curPtrArgs, par.Names[0]) - } - } else { - atyp, isPtr := p.ptrType(stripParensAlways(par.Type)) - p.expr(atyp) - if isPtr { - p.print(">") - } - } - prevLine = parLineEnd - } - - // if the closing ")" is on a separate line from the last parameter, - // print an additional "," and line break - if closing := p.lineFor(fields.Closing); 0 < prevLine && prevLine < closing { - p.print(token.COMMA) - p.linebreak(closing, 0, ignore, true) - } else if mode == typeTParam && fields.NumFields() == 1 && combinesWithName(fields.List[0].Type) { - // A type parameter list [P T] where the name P and the type expression T syntactically - // combine to another valid (value) expression requires a trailing comma, as in [P *T,] - // (or an enclosing interface as in [P interface(*T)]), so that the type parameter list - // is not parsed as an array length [P*T]. - p.print(token.COMMA) - } - - // unindent if we indented - if ws == ignore { - p.print(unindent) - } - } - - p.setPos(fields.Closing) - p.print(closeTok) -} - -// gosl: ensure basic literals are properly cast -func (p *printer) matchLiteralArgs(args []ast.Expr, params *types.Tuple) []ast.Expr { - ags := slices.Clone(args) - mx := min(len(args), params.Len()) - for i := 0; i < mx; i++ { - ag := args[i] - pr := params.At(i) - lit, ok := ag.(*ast.BasicLit) - if !ok { - continue - } - typ := pr.Type() - tnm := getLocalTypeName(typ) - nn := normalizedNumber(lit) - nn.Value = tnm + "(" + nn.Value + ")" - ags[i] = nn - } - return ags -} - -// gosl: ensure basic literals are properly cast -func (p *printer) matchLiteralType(x ast.Expr, typ *ast.Ident) bool { - if lit, ok := x.(*ast.BasicLit); ok { - p.print(typ.Name, token.LPAREN, normalizedNumber(lit), token.RPAREN) - return true - } - return false -} - -// gosl: ensure basic literals are properly cast -func (p *printer) matchAssignType(lhs []ast.Expr, rhs []ast.Expr) bool { - if len(rhs) != 1 || len(lhs) != 1 { - return false - } - val := "" - lit, ok := rhs[0].(*ast.BasicLit) - if ok { - val = normalizedNumber(lit).Value - } else { - un, ok := rhs[0].(*ast.UnaryExpr) - if !ok || un.Op != token.SUB { - return false - } - lit, ok = un.X.(*ast.BasicLit) - if !ok { - return false - } - val = "-" + normalizedNumber(lit).Value - } - var err error - var typ types.Type - if id, ok := lhs[0].(*ast.Ident); ok { - typ = p.getIdType(id) - if typ == nil { - return false - } - } else if sl, ok := lhs[0].(*ast.SelectorExpr); ok { - typ, err = p.pathType(sl) - if err != nil { - return false - } - } else if st, ok := lhs[0].(*ast.StarExpr); ok { - if id, ok := st.X.(*ast.Ident); ok { - typ = p.getIdType(id) - if typ == nil { - return false - } - } - if err != nil { - return false - } - } - if typ == nil { - return false - } - tnm := getLocalTypeName(typ) - if tnm[0] == '*' { - tnm = tnm[1:] - } - p.print(tnm, "(", val, ")") - return true -} - -// gosl: pathType returns the final type for the selector path. -// a.b.c -> sel.X = (a.b) Sel=c -- returns type of c by tracing -// through the path. -func (p *printer) pathType(x *ast.SelectorExpr) (types.Type, error) { - var paths []*ast.Ident - cur := x - for { - paths = append(paths, cur.Sel) - if sl, ok := cur.X.(*ast.SelectorExpr); ok { // path is itself a selector - cur = sl - continue - } - if id, ok := cur.X.(*ast.Ident); ok { - paths = append(paths, id) - break - } - return nil, fmt.Errorf("gosl pathType: path not a pure selector path") - } - np := len(paths) - bt, err := getStructType(p.getIdType(paths[np-1])) - if err != nil { - return nil, err - } - for pi := np - 2; pi >= 0; pi-- { - pt := paths[pi] - f := fieldByName(bt, pt.Name) - if f == nil { - return nil, fmt.Errorf("gosl pathType: field not found %q in type: %q:", pt, bt.String()) - } - if pi == 0 { - return f.Type(), nil - } else { - bt, err = getStructType(f.Type()) - if err != nil { - return nil, err - } - } - } - return nil, fmt.Errorf("gosl pathType: path not a pure selector path") -} - -// gosl: check if identifier is a pointer arg -func (p *printer) isPtrArg(id *ast.Ident) bool { - for _, pt := range p.curPtrArgs { - if id.Name == pt.Name { - return true - } - } - return false -} - -// gosl: dereference pointer vals -func (p *printer) derefPtrArgs(x ast.Expr, prec, depth int) { - if id, ok := x.(*ast.Ident); ok { - if p.isPtrArg(id) { - p.print(token.LPAREN, token.MUL, id, token.RPAREN) - } else { - p.expr1(x, prec, depth) - } - } else { - p.expr1(x, prec, depth) - } -} - -// gosl: mark pointer types, returns true if pointer -func (p *printer) ptrType(x ast.Expr) (ast.Expr, bool) { - if u, ok := x.(*ast.StarExpr); ok { - p.print("ptr 0 { - // res != nil - if id, ok := res.List[0].Type.(*ast.Ident); ok { - p.curReturnType = id - } - p.print(blank, "->", blank) - if n == 1 && res.List[0].Names == nil { - // single anonymous res; no ()'s - p.expr(stripParensAlways(res.List[0].Type)) - return - } - p.parameters(res, funcParam) - } -} - -func identListSize(list []*ast.Ident, maxSize int) (size int) { - for i, x := range list { - if i > 0 { - size += len(", ") - } - size += utf8.RuneCountInString(x.Name) - if size >= maxSize { - break - } - } - return -} - -func (p *printer) isOneLineFieldList(list []*ast.Field) bool { - if len(list) != 1 { - return false // allow only one field - } - f := list[0] - if f.Tag != nil || f.Comment != nil { - return false // don't allow tags or comments - } - // only name(s) and type - const maxSize = 30 // adjust as appropriate, this is an approximate value - namesSize := identListSize(f.Names, maxSize) - if namesSize > 0 { - namesSize = 1 // blank between names and types - } - typeSize := p.nodeSize(f.Type, maxSize) - return namesSize+typeSize <= maxSize -} - -func (p *printer) setLineComment(text string) { - p.setComment(&ast.CommentGroup{List: []*ast.Comment{{Slash: token.NoPos, Text: text}}}) -} - -func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool) { - lbrace := fields.Opening - list := fields.List - rbrace := fields.Closing - hasComments := isIncomplete || p.commentBefore(p.posFor(rbrace)) - srcIsOneLine := lbrace.IsValid() && rbrace.IsValid() && p.lineFor(lbrace) == p.lineFor(rbrace) - - if !hasComments && srcIsOneLine { - // possibly a one-line struct/interface - if len(list) == 0 { - // no blank between keyword and {} in this case - p.setPos(lbrace) - p.print(token.LBRACE) - p.setPos(rbrace) - p.print(token.RBRACE) - return - } else if p.isOneLineFieldList(list) { - // small enough - print on one line - // (don't use identList and ignore source line breaks) - p.setPos(lbrace) - p.print(token.LBRACE, blank) - f := list[0] - if isStruct { - for i, x := range f.Names { - if i > 0 { - // no comments so no need for comma position - p.print(token.COMMA, blank) - } - p.expr(x) - } - p.print(token.COLON) - if len(f.Names) > 0 { - p.print(blank) - } - p.expr(f.Type) - } else { // interface - if len(f.Names) > 0 { - name := f.Names[0] // method name - p.expr(name) - p.print(token.COLON) - p.signature(f.Type.(*ast.FuncType), nil) // don't print "func" - } else { - // embedded interface - p.expr(f.Type) - } - } - p.print(blank) - p.setPos(rbrace) - p.print(token.RBRACE) - return - } - } - // hasComments || !srcIsOneLine - - p.print(blank) - p.setPos(lbrace) - p.print(token.LBRACE, indent) - if hasComments || len(list) > 0 { - p.print(formfeed) - } - - if isStruct { - - sep := vtab - if len(list) == 1 { - sep = blank - } - var line int - for i, f := range list { - if i > 0 { - p.linebreak(p.lineFor(f.Pos()), 1, ignore, p.linesFrom(line) > 0) - } - extraTabs := 0 - p.setComment(f.Doc) - p.recordLine(&line) - if len(f.Names) > 1 { - nnm := len(f.Names) - p.setPos(f.Type.Pos()) - for ni, nm := range f.Names { - p.print(nm.Name) - p.print(token.COLON) - p.print(sep) - p.expr(f.Type) - if ni < nnm-1 { - p.print(token.COMMA) - p.print(formfeed) - } - } - extraTabs = 1 - } else if len(f.Names) > 0 { - // named fields - p.identList(f.Names, false) - p.print(token.COLON) - p.print(sep) - p.expr(f.Type) - extraTabs = 1 - } else { - // anonymous field - p.expr(f.Type) - extraTabs = 2 - } - p.print(token.COMMA) - // if f.Tag != nil { - // if len(f.Names) > 0 && sep == vtab { - // p.print(sep) - // } - // p.print(sep) - // p.expr(f.Tag) - // extraTabs = 0 - // } - if f.Comment != nil { - for ; extraTabs > 0; extraTabs-- { - p.print(sep) - } - p.setComment(f.Comment) - } - } - if isIncomplete { - if len(list) > 0 { - p.print(formfeed) - } - p.flush(p.posFor(rbrace), token.RBRACE) // make sure we don't lose the last line comment - p.setLineComment("// " + filteredMsg) - } - - } else { // interface - - var line int - var prev *ast.Ident // previous "type" identifier - for i, f := range list { - var name *ast.Ident // first name, or nil - if len(f.Names) > 0 { - name = f.Names[0] - } - if i > 0 { - // don't do a line break (min == 0) if we are printing a list of types - // TODO(gri) this doesn't work quite right if the list of types is - // spread across multiple lines - min := 1 - if prev != nil && name == prev { - min = 0 - } - p.linebreak(p.lineFor(f.Pos()), min, ignore, p.linesFrom(line) > 0) - } - p.setComment(f.Doc) - p.recordLine(&line) - if name != nil { - // method - p.expr(name) - p.signature(f.Type.(*ast.FuncType), nil) // don't print "func" - prev = nil - } else { - // embedded interface - p.expr(f.Type) - prev = nil - } - p.setComment(f.Comment) - } - if isIncomplete { - if len(list) > 0 { - p.print(formfeed) - } - p.flush(p.posFor(rbrace), token.RBRACE) // make sure we don't lose the last line comment - p.setLineComment("// contains filtered or unexported methods") - } - - } - p.print(unindent, formfeed) - p.setPos(rbrace) - p.print(token.RBRACE) -} - -// ---------------------------------------------------------------------------- -// Expressions - -func walkBinary(e *ast.BinaryExpr) (has4, has5 bool, maxProblem int) { - switch e.Op.Precedence() { - case 4: - has4 = true - case 5: - has5 = true - } - - switch l := e.X.(type) { - case *ast.BinaryExpr: - if l.Op.Precedence() < e.Op.Precedence() { - // parens will be inserted. - // pretend this is an *ast.ParenExpr and do nothing. - break - } - h4, h5, mp := walkBinary(l) - has4 = has4 || h4 - has5 = has5 || h5 - maxProblem = max(maxProblem, mp) - } - - switch r := e.Y.(type) { - case *ast.BinaryExpr: - if r.Op.Precedence() <= e.Op.Precedence() { - // parens will be inserted. - // pretend this is an *ast.ParenExpr and do nothing. - break - } - h4, h5, mp := walkBinary(r) - has4 = has4 || h4 - has5 = has5 || h5 - maxProblem = max(maxProblem, mp) - - case *ast.StarExpr: - if e.Op == token.QUO { // `*/` - maxProblem = 5 - } - - case *ast.UnaryExpr: - switch e.Op.String() + r.Op.String() { - case "/*", "&&", "&^": - maxProblem = 5 - case "++", "--": - maxProblem = max(maxProblem, 4) - } - } - return -} - -func cutoff(e *ast.BinaryExpr, depth int) int { - has4, has5, maxProblem := walkBinary(e) - if maxProblem > 0 { - return maxProblem + 1 - } - if has4 && has5 { - if depth == 1 { - return 5 - } - return 4 - } - if depth == 1 { - return 6 - } - return 4 -} - -func diffPrec(expr ast.Expr, prec int) int { - x, ok := expr.(*ast.BinaryExpr) - if !ok || prec != x.Op.Precedence() { - return 1 - } - return 0 -} - -func reduceDepth(depth int) int { - depth-- - if depth < 1 { - depth = 1 - } - return depth -} - -// Format the binary expression: decide the cutoff and then format. -// Let's call depth == 1 Normal mode, and depth > 1 Compact mode. -// (Algorithm suggestion by Russ Cox.) -// -// The precedences are: -// -// 5 * / % << >> & &^ -// 4 + - | ^ -// 3 == != < <= > >= -// 2 && -// 1 || -// -// The only decision is whether there will be spaces around levels 4 and 5. -// There are never spaces at level 6 (unary), and always spaces at levels 3 and below. -// -// To choose the cutoff, look at the whole expression but excluding primary -// expressions (function calls, parenthesized exprs), and apply these rules: -// -// 1. If there is a binary operator with a right side unary operand -// that would clash without a space, the cutoff must be (in order): -// -// /* 6 -// && 6 -// &^ 6 -// ++ 5 -// -- 5 -// -// (Comparison operators always have spaces around them.) -// -// 2. If there is a mix of level 5 and level 4 operators, then the cutoff -// is 5 (use spaces to distinguish precedence) in Normal mode -// and 4 (never use spaces) in Compact mode. -// -// 3. If there are no level 4 operators or no level 5 operators, then the -// cutoff is 6 (always use spaces) in Normal mode -// and 4 (never use spaces) in Compact mode. -func (p *printer) binaryExpr(x *ast.BinaryExpr, prec1, cutoff, depth int) { - prec := x.Op.Precedence() - if prec < prec1 { - // parenthesis needed - // Note: The parser inserts an ast.ParenExpr node; thus this case - // can only occur if the AST is created in a different way. - p.print(token.LPAREN) - p.expr0(x, reduceDepth(depth)) // parentheses undo one level of depth - p.print(token.RPAREN) - return - } - - printBlank := prec < cutoff - - ws := indent - p.expr1(x.X, prec, depth+diffPrec(x.X, prec)) - if printBlank { - p.print(blank) - } - xline := p.pos.Line // before the operator (it may be on the next line!) - yline := p.lineFor(x.Y.Pos()) - p.setPos(x.OpPos) - p.print(x.Op) - if xline != yline && xline > 0 && yline > 0 { - // at least one line break, but respect an extra empty line - // in the source - if p.linebreak(yline, 1, ws, true) > 0 { - ws = ignore - printBlank = false // no blank after line break - } - } - if printBlank { - p.print(blank) - } - p.expr1(x.Y, prec+1, depth+1) - if ws == ignore { - p.print(unindent) - } -} - -func isBinary(expr ast.Expr) bool { - _, ok := expr.(*ast.BinaryExpr) - return ok -} - -func (p *printer) expr1(expr ast.Expr, prec1, depth int) { - p.setPos(expr.Pos()) - - switch x := expr.(type) { - case *ast.BadExpr: - p.print("BadExpr") - - case *ast.Ident: - if x.Name == "int" { - p.print("i32") - } else { - p.print(x) - } - - case *ast.BinaryExpr: - if depth < 1 { - p.internalError("depth < 1:", depth) - depth = 1 - } - p.binaryExpr(x, prec1, cutoff(x, depth), depth) - - case *ast.KeyValueExpr: - p.expr(x.Key) - p.setPos(x.Colon) - p.print(token.COLON, blank) - p.expr(x.Value) - - case *ast.StarExpr: - const prec = token.UnaryPrec - if prec < prec1 { - // parenthesis needed - p.print(token.LPAREN) - p.print(token.MUL) - p.expr(x.X) - p.print(token.RPAREN) - } else { - // no parenthesis needed - p.print(token.MUL) - p.expr(x.X) - } - - case *ast.UnaryExpr: - const prec = token.UnaryPrec - if prec < prec1 { - // parenthesis needed - p.print(token.LPAREN) - p.expr(x) - p.print(token.RPAREN) - } else { - // no parenthesis needed - p.print(x.Op) - if x.Op == token.RANGE { - // TODO(gri) Remove this code if it cannot be reached. - p.print(blank) - } - p.expr1(x.X, prec, depth) - } - - case *ast.BasicLit: - if p.Config.Mode&normalizeNumbers != 0 { - x = normalizedNumber(x) - } - p.print(x) - - case *ast.FuncLit: - p.setPos(x.Type.Pos()) - p.print(token.FUNC) - // See the comment in funcDecl about how the header size is computed. - startCol := p.out.Column - len("func") - p.signature(x.Type, nil) - p.funcBody(p.distanceFrom(x.Type.Pos(), startCol), blank, x.Body) - - case *ast.ParenExpr: - if _, hasParens := x.X.(*ast.ParenExpr); hasParens { - // don't print parentheses around an already parenthesized expression - // TODO(gri) consider making this more general and incorporate precedence levels - p.expr0(x.X, depth) - } else { - p.print(token.LPAREN) - p.expr0(x.X, reduceDepth(depth)) // parentheses undo one level of depth - p.setPos(x.Rparen) - p.print(token.RPAREN) - } - - case *ast.SelectorExpr: - p.selectorExpr(x, depth) - - case *ast.TypeAssertExpr: - p.expr1(x.X, token.HighestPrec, depth) - p.print(token.PERIOD) - p.setPos(x.Lparen) - p.print(token.LPAREN) - if x.Type != nil { - p.expr(x.Type) - } else { - p.print(token.TYPE) - } - p.setPos(x.Rparen) - p.print(token.RPAREN) - - case *ast.IndexExpr: - // TODO(gri): should treat[] like parentheses and undo one level of depth - p.expr1(x.X, token.HighestPrec, 1) - p.setPos(x.Lbrack) - p.print(token.LBRACK) - p.expr0(x.Index, depth+1) - p.setPos(x.Rbrack) - p.print(token.RBRACK) - - case *ast.IndexListExpr: - // TODO(gri): as for IndexExpr, should treat [] like parentheses and undo - // one level of depth - p.expr1(x.X, token.HighestPrec, 1) - p.setPos(x.Lbrack) - p.print(token.LBRACK) - p.exprList(x.Lbrack, x.Indices, depth+1, commaTerm, x.Rbrack, false) - p.setPos(x.Rbrack) - p.print(token.RBRACK) - - case *ast.SliceExpr: - // TODO(gri): should treat[] like parentheses and undo one level of depth - p.expr1(x.X, token.HighestPrec, 1) - p.setPos(x.Lbrack) - p.print(token.LBRACK) - indices := []ast.Expr{x.Low, x.High} - if x.Max != nil { - indices = append(indices, x.Max) - } - // determine if we need extra blanks around ':' - var needsBlanks bool - if depth <= 1 { - var indexCount int - var hasBinaries bool - for _, x := range indices { - if x != nil { - indexCount++ - if isBinary(x) { - hasBinaries = true - } - } - } - if indexCount > 1 && hasBinaries { - needsBlanks = true - } - } - for i, x := range indices { - if i > 0 { - if indices[i-1] != nil && needsBlanks { - p.print(blank) - } - p.print(token.COLON) - if x != nil && needsBlanks { - p.print(blank) - } - } - if x != nil { - p.expr0(x, depth+1) - } - } - p.setPos(x.Rbrack) - p.print(token.RBRACK) - - case *ast.CallExpr: - if len(x.Args) > 1 { - depth++ - } - - // Conversions to literal function types or <-chan - // types require parentheses around the type. - paren := false - switch t := x.Fun.(type) { - case *ast.FuncType: - paren = true - case *ast.ChanType: - paren = t.Dir == ast.RECV - } - if paren { - p.print(token.LPAREN) - } - if _, ok := x.Fun.(*ast.SelectorExpr); ok { - p.methodExpr(x, depth) - break // handles everything, break out of case - } - p.expr1(x.Fun, token.HighestPrec, depth) - if paren { - p.print(token.RPAREN) - } - p.setPos(x.Lparen) - p.print(token.LPAREN) - args := x.Args - if fid, ok := x.Fun.(*ast.Ident); ok { - if obj, ok := p.pkg.TypesInfo.Uses[fid]; ok { - if ft, ok := obj.(*types.Func); ok { - sig := ft.Type().(*types.Signature) - args = p.matchLiteralArgs(x.Args, sig.Params()) - } - } - } - if x.Ellipsis.IsValid() { - p.exprList(x.Lparen, args, depth, 0, x.Ellipsis, false) - p.setPos(x.Ellipsis) - p.print(token.ELLIPSIS) - if x.Rparen.IsValid() && p.lineFor(x.Ellipsis) < p.lineFor(x.Rparen) { - p.print(token.COMMA, formfeed) - } - } else { - p.exprList(x.Lparen, args, depth, commaTerm, x.Rparen, false) - } - p.setPos(x.Rparen) - p.print(token.RPAREN) - - case *ast.CompositeLit: - // composite literal elements that are composite literals themselves may have the type omitted - if x.Type != nil { - p.expr1(x.Type, token.HighestPrec, depth) - } - p.level++ - p.setPos(x.Lbrace) - p.print(token.LBRACE) - p.exprList(x.Lbrace, x.Elts, 1, commaTerm, x.Rbrace, x.Incomplete) - // do not insert extra line break following a /*-style comment - // before the closing '}' as it might break the code if there - // is no trailing ',' - mode := noExtraLinebreak - // do not insert extra blank following a /*-style comment - // before the closing '}' unless the literal is empty - if len(x.Elts) > 0 { - mode |= noExtraBlank - } - // need the initial indent to print lone comments with - // the proper level of indentation - p.print(indent, unindent, mode) - p.setPos(x.Rbrace) - p.print(token.RBRACE, mode) - p.level-- - - case *ast.Ellipsis: - p.print(token.ELLIPSIS) - if x.Elt != nil { - p.expr(x.Elt) - } - - case *ast.ArrayType: - p.print(token.LBRACK) - if x.Len != nil { - p.expr(x.Len) - } - p.print(token.RBRACK) - p.expr(x.Elt) - - case *ast.StructType: - // p.print(token.STRUCT) - p.fieldList(x.Fields, true, x.Incomplete) - - case *ast.FuncType: - p.print(token.FUNC) - p.signature(x, nil) - - case *ast.InterfaceType: - p.print(token.INTERFACE) - p.fieldList(x.Methods, false, x.Incomplete) - - case *ast.MapType: - p.print(token.MAP, token.LBRACK) - p.expr(x.Key) - p.print(token.RBRACK) - p.expr(x.Value) - - case *ast.ChanType: - switch x.Dir { - case ast.SEND | ast.RECV: - p.print(token.CHAN) - case ast.RECV: - p.print(token.ARROW, token.CHAN) // x.Arrow and x.Pos() are the same - case ast.SEND: - p.print(token.CHAN) - p.setPos(x.Arrow) - p.print(token.ARROW) - } - p.print(blank) - p.expr(x.Value) - - default: - panic("unreachable") - } -} - -// normalizedNumber rewrites base prefixes and exponents -// of numbers to use lower-case letters (0X123 to 0x123 and 1.2E3 to 1.2e3), -// and removes leading 0's from integer imaginary literals (0765i to 765i). -// It leaves hexadecimal digits alone. -// -// normalizedNumber doesn't modify the ast.BasicLit value lit points to. -// If lit is not a number or a number in canonical format already, -// lit is returned as is. Otherwise a new ast.BasicLit is created. -func normalizedNumber(lit *ast.BasicLit) *ast.BasicLit { - if lit.Kind != token.INT && lit.Kind != token.FLOAT && lit.Kind != token.IMAG { - return lit // not a number - nothing to do - } - if len(lit.Value) < 2 { - return lit // only one digit (common case) - nothing to do - } - // len(lit.Value) >= 2 - - // We ignore lit.Kind because for lit.Kind == token.IMAG the literal may be an integer - // or floating-point value, decimal or not. Instead, just consider the literal pattern. - x := lit.Value - switch x[:2] { - default: - // 0-prefix octal, decimal int, or float (possibly with 'i' suffix) - if i := strings.LastIndexByte(x, 'E'); i >= 0 { - x = x[:i] + "e" + x[i+1:] - break - } - // remove leading 0's from integer (but not floating-point) imaginary literals - if x[len(x)-1] == 'i' && !strings.ContainsAny(x, ".e") { - x = strings.TrimLeft(x, "0_") - if x == "i" { - x = "0i" - } - } - case "0X": - x = "0x" + x[2:] - // possibly a hexadecimal float - if i := strings.LastIndexByte(x, 'P'); i >= 0 { - x = x[:i] + "p" + x[i+1:] - } - case "0x": - // possibly a hexadecimal float - i := strings.LastIndexByte(x, 'P') - if i == -1 { - return lit // nothing to do - } - x = x[:i] + "p" + x[i+1:] - case "0O": - x = "0o" + x[2:] - case "0o": - return lit // nothing to do - case "0B": - x = "0b" + x[2:] - case "0b": - return lit // nothing to do - } - - return &ast.BasicLit{ValuePos: lit.ValuePos, Kind: lit.Kind, Value: x} -} - -// selectorExpr handles an *ast.SelectorExpr node and reports whether x spans -// multiple lines, and thus was indented. -func (p *printer) selectorExpr(x *ast.SelectorExpr, depth int) (wasIndented bool) { - p.derefPtrArgs(x.X, token.HighestPrec, depth) - p.print(token.PERIOD) - if line := p.lineFor(x.Sel.Pos()); p.pos.IsValid() && p.pos.Line < line { - p.print(indent, newline) - p.setPos(x.Sel.Pos()) - p.print(x.Sel) - p.print(unindent) - return true - } - p.setPos(x.Sel.Pos()) - p.print(x.Sel) - return false -} - -// gosl: methodExpr needs to deal with possible multiple chains of selector exprs -// to determine the actual type and name of the receiver. -// a.b.c() -> sel.X = (a.b) Sel=c -func (p *printer) methodPath(x *ast.SelectorExpr) (recvPath, recvType string, pathType types.Type, err error) { - var baseRecv *ast.Ident // first receiver in path - var paths []string - cur := x - for { - paths = append(paths, cur.Sel.Name) - if sl, ok := cur.X.(*ast.SelectorExpr); ok { // path is itself a selector - cur = sl - continue - } - if id, ok := cur.X.(*ast.Ident); ok { - baseRecv = id - break - } - err = fmt.Errorf("gosl methodPath ERROR: path for method call must be simple list of fields, not %#v:", cur.X) - fmt.Println(err.Error()) - return - } - if p.isPtrArg(baseRecv) { - recvPath = "&(*" + baseRecv.Name + ")" - } else { - recvPath = "&" + baseRecv.Name - } - bt, err := getStructType(p.getIdType(baseRecv)) - if err != nil { - return - } - curt := bt - np := len(paths) - for pi := np - 1; pi >= 0; pi-- { - p := paths[pi] - recvPath += "." + p - f := fieldByName(curt, p) - if f == nil { - err = fmt.Errorf("gosl ERROR: field not found %q in type: %q:", p, curt.String()) - fmt.Println(err.Error()) - return - } - if pi == 0 { - pathType = f.Type() - recvType = getLocalTypeName(f.Type()) - } else { - curt, err = getStructType(f.Type()) - if err != nil { - return - } - } - } - return -} - -func fieldByName(st *types.Struct, name string) *types.Var { - nf := st.NumFields() - for i := range nf { - f := st.Field(i) - if f.Name() == name { - return f - } - } - return nil -} - -func (p *printer) getIdType(id *ast.Ident) types.Type { - if obj, ok := p.pkg.TypesInfo.Uses[id]; ok { - return obj.Type() - } - return nil -} - -func getLocalTypeName(typ types.Type) string { - _, nm := path.Split(typ.String()) - return nm -} - -func getStructType(typ types.Type) (*types.Struct, error) { - typ = typ.Underlying() - if st, ok := typ.(*types.Struct); ok { - return st, nil - } - if ptr, ok := typ.(*types.Pointer); ok { - typ = ptr.Elem().Underlying() - if st, ok := typ.(*types.Struct); ok { - return st, nil - } - } - err := fmt.Errorf("gosl ERROR: type is not a struct and it should be: %q %+t", typ.String(), typ) - fmt.Println(err.Error()) - return nil, err -} - -func (p *printer) methodExpr(x *ast.CallExpr, depth int) { - path := x.Fun.(*ast.SelectorExpr) // we know fun is selector - methName := path.Sel.Name - recvPath := "" - recvType := "" - var err error - pathIsPackage := false - var pathType types.Type - if sl, ok := path.X.(*ast.SelectorExpr); ok { // path is itself a selector - recvPath, recvType, pathType, err = p.methodPath(sl) - if err != nil { - return - } - } else if id, ok := path.X.(*ast.Ident); ok { - recvPath = id.Name - typ := p.getIdType(id) - if typ != nil { - recvType = getLocalTypeName(typ) - if strings.HasPrefix(recvType, "invalid") { - pathIsPackage = true - recvType = id.Name // is a package path - } else { - pathType = typ - } - } else { - pathIsPackage = true - recvType = id.Name // is a package path - } - } else { - err := fmt.Errorf("gosl methodExpr ERROR: path expression for method call must be simple list of fields, not %#v:", path.X) - fmt.Println(err.Error()) - return - } - if pathIsPackage { - p.print(recvType + "." + methName) - p.setPos(x.Lparen) - p.print(token.LPAREN) - } else { - p.print(recvType + "_" + methName) - p.setPos(x.Lparen) - p.print(token.LPAREN) - p.print(recvPath) - if len(x.Args) > 0 { - p.print(token.COMMA, blank) - } - } - args := x.Args - if pathType != nil { - meth, _, _ := types.LookupFieldOrMethod(pathType, true, p.pkg.Types, methName) - if meth != nil { - if ft, ok := meth.(*types.Func); ok { - sig := ft.Type().(*types.Signature) - args = p.matchLiteralArgs(x.Args, sig.Params()) - } - } - } - if x.Ellipsis.IsValid() { - p.exprList(x.Lparen, args, depth, 0, x.Ellipsis, false) - p.setPos(x.Ellipsis) - p.print(token.ELLIPSIS) - if x.Rparen.IsValid() && p.lineFor(x.Ellipsis) < p.lineFor(x.Rparen) { - p.print(token.COMMA, formfeed) - } - } else { - p.exprList(x.Lparen, args, depth, commaTerm, x.Rparen, false) - } - p.setPos(x.Rparen) - p.print(token.RPAREN) - -} - -func (p *printer) expr0(x ast.Expr, depth int) { - p.expr1(x, token.LowestPrec, depth) -} - -func (p *printer) expr(x ast.Expr) { - const depth = 1 - p.expr1(x, token.LowestPrec, depth) -} - -// ---------------------------------------------------------------------------- -// Statements - -// Print the statement list indented, but without a newline after the last statement. -// Extra line breaks between statements in the source are respected but at most one -// empty line is printed between statements. -func (p *printer) stmtList(list []ast.Stmt, nindent int, nextIsRBrace bool) { - if nindent > 0 { - p.print(indent) - } - var line int - i := 0 - for _, s := range list { - // ignore empty statements (was issue 3466) - if _, isEmpty := s.(*ast.EmptyStmt); !isEmpty { - // nindent == 0 only for lists of switch/select case clauses; - // in those cases each clause is a new section - if len(p.output) > 0 { - // only print line break if we are not at the beginning of the output - // (i.e., we are not printing only a partial program) - p.linebreak(p.lineFor(s.Pos()), 1, ignore, i == 0 || nindent == 0 || p.linesFrom(line) > 0) - } - p.recordLine(&line) - p.stmt(s, nextIsRBrace && i == len(list)-1, false) - // labeled statements put labels on a separate line, but here - // we only care about the start line of the actual statement - // without label - correct line for each label - for t := s; ; { - lt, _ := t.(*ast.LabeledStmt) - if lt == nil { - break - } - line++ - t = lt.Stmt - } - i++ - } - } - if nindent > 0 { - p.print(unindent) - } -} - -// block prints an *ast.BlockStmt; it always spans at least two lines. -func (p *printer) block(b *ast.BlockStmt, nindent int) { - p.setPos(b.Lbrace) - p.print(token.LBRACE) - p.stmtList(b.List, nindent, true) - p.linebreak(p.lineFor(b.Rbrace), 1, ignore, true) - p.setPos(b.Rbrace) - p.print(token.RBRACE) -} - -func isTypeName(x ast.Expr) bool { - switch t := x.(type) { - case *ast.Ident: - return true - case *ast.SelectorExpr: - return isTypeName(t.X) - } - return false -} - -func stripParens(x ast.Expr) ast.Expr { - if px, strip := x.(*ast.ParenExpr); strip { - // parentheses must not be stripped if there are any - // unparenthesized composite literals starting with - // a type name - ast.Inspect(px.X, func(node ast.Node) bool { - switch x := node.(type) { - case *ast.ParenExpr: - // parentheses protect enclosed composite literals - return false - case *ast.CompositeLit: - if isTypeName(x.Type) { - strip = false // do not strip parentheses - } - return false - } - // in all other cases, keep inspecting - return true - }) - if strip { - return stripParens(px.X) - } - } - return x -} - -func stripParensAlways(x ast.Expr) ast.Expr { - if x, ok := x.(*ast.ParenExpr); ok { - return stripParensAlways(x.X) - } - return x -} - -func (p *printer) controlClause(isForStmt bool, init ast.Stmt, expr ast.Expr, post ast.Stmt) { - p.print(blank) - p.print(token.LPAREN) - needsBlank := false - if init == nil && post == nil { - // no semicolons required - if expr != nil { - p.expr(stripParens(expr)) - needsBlank = true - } - } else { - // all semicolons required - // (they are not separators, print them explicitly) - if init != nil { - p.stmt(init, false, false) // false = generate own semi - p.print(blank) - } else { - p.print(token.SEMICOLON, blank) - } - if expr != nil { - p.expr(stripParens(expr)) - needsBlank = true - } - if isForStmt { - p.print(token.SEMICOLON, blank) - needsBlank = false - if post != nil { - p.stmt(post, false, true) // nosemi - needsBlank = true - } - } - } - p.print(token.RPAREN) - if needsBlank { - p.print(blank) - } -} - -// indentList reports whether an expression list would look better if it -// were indented wholesale (starting with the very first element, rather -// than starting at the first line break). -func (p *printer) indentList(list []ast.Expr) bool { - // Heuristic: indentList reports whether there are more than one multi- - // line element in the list, or if there is any element that is not - // starting on the same line as the previous one ends. - if len(list) >= 2 { - var b = p.lineFor(list[0].Pos()) - var e = p.lineFor(list[len(list)-1].End()) - if 0 < b && b < e { - // list spans multiple lines - n := 0 // multi-line element count - line := b - for _, x := range list { - xb := p.lineFor(x.Pos()) - xe := p.lineFor(x.End()) - if line < xb { - // x is not starting on the same - // line as the previous one ended - return true - } - if xb < xe { - // x is a multi-line element - n++ - } - line = xe - } - return n > 1 - } - } - return false -} - -func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, nosemi bool) { - p.setPos(stmt.Pos()) - - switch s := stmt.(type) { - case *ast.BadStmt: - p.print("BadStmt") - - case *ast.DeclStmt: - p.decl(s.Decl) - - case *ast.EmptyStmt: - // nothing to do - - case *ast.LabeledStmt: - // a "correcting" unindent immediately following a line break - // is applied before the line break if there is no comment - // between (see writeWhitespace) - p.print(unindent) - p.expr(s.Label) - p.setPos(s.Colon) - p.print(token.COLON, indent) - if e, isEmpty := s.Stmt.(*ast.EmptyStmt); isEmpty { - if !nextIsRBrace { - p.print(newline) - p.setPos(e.Pos()) - p.print(token.SEMICOLON) - break - } - } else { - p.linebreak(p.lineFor(s.Stmt.Pos()), 1, ignore, true) - } - p.stmt(s.Stmt, nextIsRBrace, nosemi) - - case *ast.ExprStmt: - const depth = 1 - p.expr0(s.X, depth) - if !nosemi { - p.print(token.SEMICOLON) - } - - case *ast.SendStmt: - const depth = 1 - p.expr0(s.Chan, depth) - p.print(blank) - p.setPos(s.Arrow) - p.print(token.ARROW, blank) - p.expr0(s.Value, depth) - - case *ast.IncDecStmt: - const depth = 1 - p.expr0(s.X, depth+1) - p.setPos(s.TokPos) - p.print(s.Tok) - if !nosemi { - p.print(token.SEMICOLON) - } - - case *ast.AssignStmt: - var depth = 1 - if len(s.Lhs) > 1 && len(s.Rhs) > 1 { - depth++ - } - if s.Tok == token.DEFINE { - p.print("var", blank) // we don't know if it is var or let.. - } - p.exprList(s.Pos(), s.Lhs, depth, 0, s.TokPos, false) - p.print(blank) - p.setPos(s.TokPos) - switch s.Tok { - case token.DEFINE: - p.print(token.ASSIGN, blank) - case token.AND_NOT_ASSIGN: - p.print(token.AND_ASSIGN, blank, "~") - default: - p.print(s.Tok, blank) - } - if p.matchAssignType(s.Lhs, s.Rhs) { - } else { - p.exprList(s.TokPos, s.Rhs, depth, 0, token.NoPos, false) - } - if !nosemi { - p.print(token.SEMICOLON) - } - - case *ast.GoStmt: - p.print(token.GO, blank) - p.expr(s.Call) - - case *ast.DeferStmt: - p.print(token.DEFER, blank) - p.expr(s.Call) - - case *ast.ReturnStmt: - p.print(token.RETURN) - if s.Results != nil { - p.print(blank) - if !p.matchLiteralType(s.Results[0], p.curReturnType) { - // Use indentList heuristic to make corner cases look - // better (issue 1207). A more systematic approach would - // always indent, but this would cause significant - // reformatting of the code base and not necessarily - // lead to more nicely formatted code in general. - if p.indentList(s.Results) { - p.print(indent) - // Use NoPos so that a newline never goes before - // the results (see issue #32854). - p.exprList(token.NoPos, s.Results, 1, noIndent, token.NoPos, false) - p.print(unindent) - } else { - p.exprList(token.NoPos, s.Results, 1, 0, token.NoPos, false) - } - } - } - if !nosemi { - p.print(token.SEMICOLON) - } - - case *ast.BranchStmt: - p.print(s.Tok) - if s.Label != nil { - p.print(blank) - p.expr(s.Label) - } - p.print(token.SEMICOLON) - - case *ast.BlockStmt: - p.block(s, 1) - - case *ast.IfStmt: - p.print(token.IF) - p.controlClause(false, s.Init, s.Cond, nil) - p.block(s.Body, 1) - if s.Else != nil { - p.print(blank, token.ELSE, blank) - switch s.Else.(type) { - case *ast.BlockStmt, *ast.IfStmt: - p.stmt(s.Else, nextIsRBrace, false) - default: - // This can only happen with an incorrectly - // constructed AST. Permit it but print so - // that it can be parsed without errors. - p.print(token.LBRACE, indent, formfeed) - p.stmt(s.Else, true, false) - p.print(unindent, formfeed, token.RBRACE) - } - } - - case *ast.CaseClause: - if s.List != nil { - p.print(token.CASE, blank) - p.exprList(s.Pos(), s.List, 1, 0, s.Colon, false) - } else { - p.print(token.DEFAULT) - } - p.setPos(s.Colon) - p.print(token.COLON, blank, token.LBRACE) // Go implies new context, C doesn't - p.stmtList(s.Body, 1, nextIsRBrace) - p.print(formfeed, token.RBRACE) - - case *ast.SwitchStmt: - p.print(token.SWITCH) - p.controlClause(false, s.Init, s.Tag, nil) - p.block(s.Body, 0) - - case *ast.TypeSwitchStmt: - p.print(token.SWITCH) - if s.Init != nil { - p.print(blank) - p.stmt(s.Init, false, false) - p.print(token.SEMICOLON) - } - p.print(blank) - p.stmt(s.Assign, false, false) - p.print(blank) - p.block(s.Body, 0) - - case *ast.CommClause: - if s.Comm != nil { - p.print(token.CASE, blank) - p.stmt(s.Comm, false, false) - } else { - p.print(token.DEFAULT) - } - p.setPos(s.Colon) - p.print(token.COLON) - p.stmtList(s.Body, 1, nextIsRBrace) - - case *ast.SelectStmt: - p.print(token.SELECT, blank) - body := s.Body - if len(body.List) == 0 && !p.commentBefore(p.posFor(body.Rbrace)) { - // print empty select statement w/o comments on one line - p.setPos(body.Lbrace) - p.print(token.LBRACE) - p.setPos(body.Rbrace) - p.print(token.RBRACE) - } else { - p.block(body, 0) - } - - case *ast.ForStmt: - p.print(token.FOR) - p.controlClause(true, s.Init, s.Cond, s.Post) - p.block(s.Body, 1) - - case *ast.RangeStmt: - p.print(token.FOR, blank) - if s.Key != nil { - p.expr(s.Key) - if s.Value != nil { - // use position of value following the comma as - // comma position for correct comment placement - p.setPos(s.Value.Pos()) - p.print(token.COMMA, blank) - p.expr(s.Value) - } - p.print(blank) - p.setPos(s.TokPos) - p.print(s.Tok, blank) - } - p.print(token.RANGE, blank) - p.expr(stripParens(s.X)) - p.print(blank) - p.block(s.Body, 1) - - default: - panic("unreachable") - } -} - -// ---------------------------------------------------------------------------- -// Declarations - -// The keepTypeColumn function determines if the type column of a series of -// consecutive const or var declarations must be kept, or if initialization -// values (V) can be placed in the type column (T) instead. The i'th entry -// in the result slice is true if the type column in spec[i] must be kept. -// -// For example, the declaration: -// -// const ( -// foobar int = 42 // comment -// x = 7 // comment -// foo -// bar = 991 -// ) -// -// leads to the type/values matrix below. A run of value columns (V) can -// be moved into the type column if there is no type for any of the values -// in that column (we only move entire columns so that they align properly). -// -// matrix formatted result -// matrix -// T V -> T V -> true there is a T and so the type -// - V - V true column must be kept -// - - - - false -// - V V - false V is moved into T column -func keepTypeColumn(specs []ast.Spec) []bool { - m := make([]bool, len(specs)) - - populate := func(i, j int, keepType bool) { - if keepType { - for ; i < j; i++ { - m[i] = true - } - } - } - - i0 := -1 // if i0 >= 0 we are in a run and i0 is the start of the run - var keepType bool - for i, s := range specs { - t := s.(*ast.ValueSpec) - if t.Values != nil { - if i0 < 0 { - // start of a run of ValueSpecs with non-nil Values - i0 = i - keepType = false - } - } else { - if i0 >= 0 { - // end of a run - populate(i0, i, keepType) - i0 = -1 - } - } - if t.Type != nil { - keepType = true - } - } - if i0 >= 0 { - // end of a run - populate(i0, len(specs), keepType) - } - - return m -} - -func (p *printer) valueSpec(s *ast.ValueSpec, keepType bool, tok token.Token, firstSpec *ast.ValueSpec, isIota bool, idx int) { - p.setComment(s.Doc) - - // gosl: key to use Pos() as first arg to trigger emitting of comments! - switch tok { - case token.CONST: - p.setPos(s.Pos()) - p.print(tok, blank) - case token.TYPE: - p.setPos(s.Pos()) - p.print("alias", blank) - } - p.print(vtab) - - extraTabs := 3 - p.identList(s.Names, false) // always present - if isIota { - if s.Type != nil { - p.print(token.COLON, blank) - p.expr(s.Type) - } else if firstSpec.Type != nil { - p.print(token.COLON, blank) - p.expr(firstSpec.Type) - } - p.print(vtab, token.ASSIGN, blank) - p.print(fmt.Sprintf("%d", idx)) - } else if s.Type != nil || keepType { - p.print(token.COLON, blank) - p.expr(s.Type) - extraTabs-- - } else if tok == token.CONST && firstSpec.Type != nil { - p.expr(firstSpec.Type) - extraTabs-- - } - - if !(isIota && s == firstSpec) && s.Values != nil { - p.print(vtab, token.ASSIGN, blank) - p.exprList(token.NoPos, s.Values, 1, 0, token.NoPos, false) - extraTabs-- - } - p.print(token.SEMICOLON) - if s.Comment != nil { - for ; extraTabs > 0; extraTabs-- { - p.print(vtab) - } - p.setComment(s.Comment) - } -} - -func sanitizeImportPath(lit *ast.BasicLit) *ast.BasicLit { - // Note: An unmodified AST generated by go/parser will already - // contain a backward- or double-quoted path string that does - // not contain any invalid characters, and most of the work - // here is not needed. However, a modified or generated AST - // may possibly contain non-canonical paths. Do the work in - // all cases since it's not too hard and not speed-critical. - - // if we don't have a proper string, be conservative and return whatever we have - if lit.Kind != token.STRING { - return lit - } - s, err := strconv.Unquote(lit.Value) - if err != nil { - return lit - } - - // if the string is an invalid path, return whatever we have - // - // spec: "Implementation restriction: A compiler may restrict - // ImportPaths to non-empty strings using only characters belonging - // to Unicode's L, M, N, P, and S general categories (the Graphic - // characters without spaces) and may also exclude the characters - // !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character - // U+FFFD." - if s == "" { - return lit - } - const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD" - for _, r := range s { - if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) { - return lit - } - } - - // otherwise, return the double-quoted path - s = strconv.Quote(s) - if s == lit.Value { - return lit // nothing wrong with lit - } - return &ast.BasicLit{ValuePos: lit.ValuePos, Kind: token.STRING, Value: s} -} - -// The parameter n is the number of specs in the group. If doIndent is set, -// multi-line identifier lists in the spec are indented when the first -// linebreak is encountered. -func (p *printer) spec(spec ast.Spec, n int, doIndent bool, tok token.Token) { - switch s := spec.(type) { - case *ast.ImportSpec: - p.setComment(s.Doc) - if s.Name != nil { - p.expr(s.Name) - p.print(blank) - } - p.expr(sanitizeImportPath(s.Path)) - p.setComment(s.Comment) - p.setPos(s.EndPos) - - case *ast.ValueSpec: - if n != 1 { - p.internalError("expected n = 1; got", n) - } - p.setComment(s.Doc) - - if len(s.Names) > 1 { - nnm := len(s.Names) - for ni, nm := range s.Names { - p.print(tok, blank) - p.print(nm.Name) - if s.Type != nil { - p.print(token.COLON, blank) - p.expr(s.Type) - } - if s.Values != nil { - p.print(blank, token.ASSIGN, blank) - p.exprList(token.NoPos, s.Values, 1, 0, token.NoPos, false) - } - p.print(token.SEMICOLON) - if ni < nnm-1 { - p.print(formfeed) - } - } - } else { - p.print(tok, blank) - p.identList(s.Names, doIndent) // always present - if s.Type != nil { - p.print(token.COLON, blank) - p.expr(s.Type) - } - if s.Values != nil { - p.print(blank, token.ASSIGN, blank) - p.exprList(token.NoPos, s.Values, 1, 0, token.NoPos, false) - } - p.print(token.SEMICOLON) - p.setComment(s.Comment) - } - - case *ast.TypeSpec: - p.setComment(s.Doc) - st, isStruct := s.Type.(*ast.StructType) - if isStruct { - p.setPos(st.Pos()) - p.print(token.STRUCT, blank) - } else { - p.print("alias", blank) - } - p.expr(s.Name) - if !isStruct { - p.print(blank, token.ASSIGN, blank) - } - if s.TypeParams != nil { - p.parameters(s.TypeParams, typeTParam) - } - // if n == 1 { - // p.print(blank) - // } else { - // p.print(vtab) - // } - if s.Assign.IsValid() { - p.print(token.ASSIGN, blank) - } - p.expr(s.Type) - if !isStruct { - p.print(token.SEMICOLON) - } - p.setComment(s.Comment) - - default: - panic("unreachable") - } -} - -func (p *printer) genDecl(d *ast.GenDecl) { - p.setComment(d.Doc) - // note: critical to print here to trigger comment generation in right place - p.setPos(d.Pos()) - if d.Tok == token.IMPORT { - return - } - // p.print(d.Pos(), d.Tok, blank) - p.print(ignore) // don't print - - if d.Lparen.IsValid() || len(d.Specs) != 1 { - // group of parenthesized declarations - // p.setPos(d.Lparen) - // p.print(token.LPAREN) - if n := len(d.Specs); n > 0 { - // p.print(indent, formfeed) - if n > 1 && (d.Tok == token.CONST || d.Tok == token.VAR) { - // two or more grouped const/var declarations: - // determine if the type column must be kept - keepType := keepTypeColumn(d.Specs) - firstSpec := d.Specs[0].(*ast.ValueSpec) - isIota := false - if d.Tok == token.CONST { - if id, isId := firstSpec.Values[0].(*ast.Ident); isId { - if id.Name == "iota" { - isIota = true - } - } - } - var line int - for i, s := range d.Specs { - if i > 0 { - p.linebreak(p.lineFor(s.Pos()), 1, ignore, p.linesFrom(line) > 0) - } - p.recordLine(&line) - p.valueSpec(s.(*ast.ValueSpec), keepType[i], d.Tok, firstSpec, isIota, i) - } - } else { - var line int - for i, s := range d.Specs { - if i > 0 { - p.linebreak(p.lineFor(s.Pos()), 1, ignore, p.linesFrom(line) > 0) - } - p.recordLine(&line) - p.spec(s, n, false, d.Tok) - } - } - // p.print(unindent, formfeed) - } - // p.setPos(d.Rparen) - // p.print(token.RPAREN) - } else if len(d.Specs) > 0 { - // single declaration - p.spec(d.Specs[0], 1, true, d.Tok) - } -} - -// sizeCounter is an io.Writer which counts the number of bytes written, -// as well as whether a newline character was seen. -type sizeCounter struct { - hasNewline bool - size int -} - -func (c *sizeCounter) Write(p []byte) (int, error) { - if !c.hasNewline { - for _, b := range p { - if b == '\n' || b == '\f' { - c.hasNewline = true - break - } - } - } - c.size += len(p) - return len(p), nil -} - -// nodeSize determines the size of n in chars after formatting. -// The result is <= maxSize if the node fits on one line with at -// most maxSize chars and the formatted output doesn't contain -// any control chars. Otherwise, the result is > maxSize. -func (p *printer) nodeSize(n ast.Node, maxSize int) (size int) { - // nodeSize invokes the printer, which may invoke nodeSize - // recursively. For deep composite literal nests, this can - // lead to an exponential algorithm. Remember previous - // results to prune the recursion (was issue 1628). - if size, found := p.nodeSizes[n]; found { - return size - } - - size = maxSize + 1 // assume n doesn't fit - p.nodeSizes[n] = size - - // nodeSize computation must be independent of particular - // style so that we always get the same decision; print - // in RawFormat - cfg := Config{Mode: RawFormat} - var counter sizeCounter - if err := cfg.fprint(&counter, p.pkg, n, p.nodeSizes); err != nil { - return - } - if counter.size <= maxSize && !counter.hasNewline { - // n fits in a single line - size = counter.size - p.nodeSizes[n] = size - } - return -} - -// numLines returns the number of lines spanned by node n in the original source. -func (p *printer) numLines(n ast.Node) int { - if from := n.Pos(); from.IsValid() { - if to := n.End(); to.IsValid() { - return p.lineFor(to) - p.lineFor(from) + 1 - } - } - return infinity -} - -// bodySize is like nodeSize but it is specialized for *ast.BlockStmt's. -func (p *printer) bodySize(b *ast.BlockStmt, maxSize int) int { - pos1 := b.Pos() - pos2 := b.Rbrace - if pos1.IsValid() && pos2.IsValid() && p.lineFor(pos1) != p.lineFor(pos2) { - // opening and closing brace are on different lines - don't make it a one-liner - return maxSize + 1 - } - if len(b.List) > 5 { - // too many statements - don't make it a one-liner - return maxSize + 1 - } - // otherwise, estimate body size - bodySize := p.commentSizeBefore(p.posFor(pos2)) - for i, s := range b.List { - if bodySize > maxSize { - break // no need to continue - } - if i > 0 { - bodySize += 2 // space for a semicolon and blank - } - bodySize += p.nodeSize(s, maxSize) - } - return bodySize -} - -// funcBody prints a function body following a function header of given headerSize. -// If the header's and block's size are "small enough" and the block is "simple enough", -// the block is printed on the current line, without line breaks, spaced from the header -// by sep. Otherwise the block's opening "{" is printed on the current line, followed by -// lines for the block's statements and its closing "}". -func (p *printer) funcBody(headerSize int, sep whiteSpace, b *ast.BlockStmt) { - if b == nil { - return - } - - // save/restore composite literal nesting level - defer func(level int) { - p.level = level - }(p.level) - p.level = 0 - - const maxSize = 100 - if headerSize+p.bodySize(b, maxSize) <= maxSize { - p.print(sep) - p.setPos(b.Lbrace) - p.print(token.LBRACE) - if len(b.List) > 0 { - p.print(blank) - for i, s := range b.List { - if i > 0 { - p.print(token.SEMICOLON, blank) - } - p.stmt(s, i == len(b.List)-1, false) - } - p.print(blank) - } - p.print(noExtraLinebreak) - p.setPos(b.Rbrace) - p.print(token.RBRACE, noExtraLinebreak) - return - } - - if sep != ignore { - p.print(blank) // always use blank - } - p.block(b, 1) -} - -// distanceFrom returns the column difference between p.out (the current output -// position) and startOutCol. If the start position is on a different line from -// the current position (or either is unknown), the result is infinity. -func (p *printer) distanceFrom(startPos token.Pos, startOutCol int) int { - if startPos.IsValid() && p.pos.IsValid() && p.posFor(startPos).Line == p.pos.Line { - return p.out.Column - startOutCol - } - return infinity -} - -func (p *printer) methRecvType(typ ast.Expr) string { - switch x := typ.(type) { - case *ast.StarExpr: - return p.methRecvType(x.X) - case *ast.Ident: - return x.Name - default: - return fmt.Sprintf("recv type unknown: %+T", x) - } - return "" -} - -func (p *printer) funcDecl(d *ast.FuncDecl) { - p.setComment(d.Doc) - p.setPos(d.Pos()) - // We have to save startCol only after emitting FUNC; otherwise it can be on a - // different line (all whitespace preceding the FUNC is emitted only when the - // FUNC is emitted). - startCol := p.out.Column - len("func ") - if d.Recv != nil { - for ex := range p.ExcludeFunctions { - if d.Name.Name == ex { - return - } - } - p.print("fn", blank) - if d.Recv.List[0].Names != nil { - p.curMethRecv = d.Recv.List[0] - if p.printMethRecv() { - p.curPtrArgs = []*ast.Ident{p.curMethRecv.Names[0]} - } - // fmt.Printf("cur func recv: %v\n", p.curMethRecv) - } - // p.parameters(d.Recv, funcParam) // method: print receiver - // p.print(blank) - } else { - p.print("fn", blank) - } - p.expr(d.Name) - p.signature(d.Type, d.Recv) - p.funcBody(p.distanceFrom(d.Pos(), startCol), vtab, d.Body) - p.curPtrArgs = nil - p.curMethRecv = nil -} - -func (p *printer) decl(decl ast.Decl) { - switch d := decl.(type) { - case *ast.BadDecl: - p.setPos(d.Pos()) - p.print("BadDecl") - case *ast.GenDecl: - p.genDecl(d) - case *ast.FuncDecl: - p.funcDecl(d) - default: - panic("unreachable") - } -} - -// ---------------------------------------------------------------------------- -// Files - -func declToken(decl ast.Decl) (tok token.Token) { - tok = token.ILLEGAL - switch d := decl.(type) { - case *ast.GenDecl: - tok = d.Tok - case *ast.FuncDecl: - tok = token.FUNC - } - return -} - -func (p *printer) declList(list []ast.Decl) { - tok := token.ILLEGAL - for _, d := range list { - prev := tok - tok = declToken(d) - // If the declaration token changed (e.g., from CONST to TYPE) - // or the next declaration has documentation associated with it, - // print an empty line between top-level declarations. - // (because p.linebreak is called with the position of d, which - // is past any documentation, the minimum requirement is satisfied - // even w/o the extra getDoc(d) nil-check - leave it in case the - // linebreak logic improves - there's already a TODO). - if len(p.output) > 0 { - // only print line break if we are not at the beginning of the output - // (i.e., we are not printing only a partial program) - min := 1 - if prev != tok || getDoc(d) != nil { - min = 2 - } - // start a new section if the next declaration is a function - // that spans multiple lines (see also issue #19544) - p.linebreak(p.lineFor(d.Pos()), min, ignore, tok == token.FUNC && p.numLines(d) > 1) - } - p.decl(d) - } -} - -func (p *printer) file(src *ast.File) { - p.setComment(src.Doc) - p.setPos(src.Pos()) - p.print(token.PACKAGE, blank) - p.expr(src.Name) - p.declList(src.Decls) - p.print(newline) -} diff --git a/gpu/gosl/slprint/printer.go b/gpu/gosl/slprint/printer.go deleted file mode 100644 index 0fa383be44..0000000000 --- a/gpu/gosl/slprint/printer.go +++ /dev/null @@ -1,1444 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slprint - -import ( - "fmt" - "go/ast" - "go/build/constraint" - "go/token" - "io" - "os" - "strings" - "sync" - "text/tabwriter" - "unicode" - - "golang.org/x/tools/go/packages" -) - -const ( - maxNewlines = 2 // max. number of newlines between source text - debug = false // enable for debugging - infinity = 1 << 30 -) - -type whiteSpace byte - -const ( - ignore = whiteSpace(0) - blank = whiteSpace(' ') - vtab = whiteSpace('\v') - newline = whiteSpace('\n') - formfeed = whiteSpace('\f') - indent = whiteSpace('>') - unindent = whiteSpace('<') -) - -// A pmode value represents the current printer mode. -type pmode int - -const ( - noExtraBlank pmode = 1 << iota // disables extra blank after /*-style comment - noExtraLinebreak // disables extra line break after /*-style comment -) - -type commentInfo struct { - cindex int // current comment index - comment *ast.CommentGroup // = printer.comments[cindex]; or nil - commentOffset int // = printer.posFor(printer.comments[cindex].List[0].Pos()).Offset; or infinity - commentNewline bool // true if the comment group contains newlines -} - -type printer struct { - // Configuration (does not change after initialization) - Config - fset *token.FileSet - pkg *packages.Package // gosl: extra - - // Current state - output []byte // raw printer result - indent int // current indentation - level int // level == 0: outside composite literal; level > 0: inside composite literal - mode pmode // current printer mode - endAlignment bool // if set, terminate alignment immediately - impliedSemi bool // if set, a linebreak implies a semicolon - lastTok token.Token // last token printed (token.ILLEGAL if it's whitespace) - prevOpen token.Token // previous non-brace "open" token (, [, or token.ILLEGAL - wsbuf []whiteSpace // delayed white space - goBuild []int // start index of all //go:build comments in output - plusBuild []int // start index of all // +build comments in output - - // Positions - // The out position differs from the pos position when the result - // formatting differs from the source formatting (in the amount of - // white space). If there's a difference and SourcePos is set in - // ConfigMode, //line directives are used in the output to restore - // original source positions for a reader. - pos token.Position // current position in AST (source) space - out token.Position // current position in output space - last token.Position // value of pos after calling writeString - linePtr *int // if set, record out.Line for the next token in *linePtr - sourcePosErr error // if non-nil, the first error emitting a //line directive - - // The list of all source comments, in order of appearance. - comments []*ast.CommentGroup // may be nil - useNodeComments bool // if not set, ignore lead and line comments of nodes - - // Information about p.comments[p.cindex]; set up by nextComment. - commentInfo - - // Cache of already computed node sizes. - nodeSizes map[ast.Node]int - - // Cache of most recently computed line position. - cachedPos token.Pos - cachedLine int // line corresponding to cachedPos - - // current arguments to function that are pointers and thus need dereferencing - // when accessing fields - curPtrArgs []*ast.Ident - curMethRecv *ast.Field // current method receiver, also included in curPtrArgs if ptr - curReturnType *ast.Ident -} - -func (p *printer) internalError(msg ...any) { - if debug { - fmt.Print(p.pos.String() + ": ") - fmt.Println(msg...) - panic("go/printer") - } -} - -// commentsHaveNewline reports whether a list of comments belonging to -// an *ast.CommentGroup contains newlines. Because the position information -// may only be partially correct, we also have to read the comment text. -func (p *printer) commentsHaveNewline(list []*ast.Comment) bool { - // len(list) > 0 - line := p.lineFor(list[0].Pos()) - for i, c := range list { - if i > 0 && p.lineFor(list[i].Pos()) != line { - // not all comments on the same line - return true - } - if t := c.Text; len(t) >= 2 && (t[1] == '/' || strings.Contains(t, "\n")) { - return true - } - } - _ = line - return false -} - -func (p *printer) nextComment() { - for p.cindex < len(p.comments) { - c := p.comments[p.cindex] - p.cindex++ - if list := c.List; len(list) > 0 { - p.comment = c - p.commentOffset = p.posFor(list[0].Pos()).Offset - p.commentNewline = p.commentsHaveNewline(list) - return - } - // we should not reach here (correct ASTs don't have empty - // ast.CommentGroup nodes), but be conservative and try again - } - // no more comments - p.commentOffset = infinity -} - -// commentBefore reports whether the current comment group occurs -// before the next position in the source code and printing it does -// not introduce implicit semicolons. -func (p *printer) commentBefore(next token.Position) bool { - return p.commentOffset < next.Offset && (!p.impliedSemi || !p.commentNewline) -} - -// commentSizeBefore returns the estimated size of the -// comments on the same line before the next position. -func (p *printer) commentSizeBefore(next token.Position) int { - // save/restore current p.commentInfo (p.nextComment() modifies it) - defer func(info commentInfo) { - p.commentInfo = info - }(p.commentInfo) - - size := 0 - for p.commentBefore(next) { - for _, c := range p.comment.List { - size += len(c.Text) - } - p.nextComment() - } - return size -} - -// recordLine records the output line number for the next non-whitespace -// token in *linePtr. It is used to compute an accurate line number for a -// formatted construct, independent of pending (not yet emitted) whitespace -// or comments. -func (p *printer) recordLine(linePtr *int) { - p.linePtr = linePtr -} - -// linesFrom returns the number of output lines between the current -// output line and the line argument, ignoring any pending (not yet -// emitted) whitespace or comments. It is used to compute an accurate -// size (in number of lines) for a formatted construct. -func (p *printer) linesFrom(line int) int { - return p.out.Line - line -} - -func (p *printer) posFor(pos token.Pos) token.Position { - // not used frequently enough to cache entire token.Position - return p.fset.PositionFor(pos, false /* absolute position */) -} - -func (p *printer) lineFor(pos token.Pos) int { - if pos != p.cachedPos { - p.cachedPos = pos - p.cachedLine = p.fset.PositionFor(pos, false /* absolute position */).Line - } - return p.cachedLine -} - -// writeLineDirective writes a //line directive if necessary. -func (p *printer) writeLineDirective(pos token.Position) { - if pos.IsValid() && (p.out.Line != pos.Line || p.out.Filename != pos.Filename) { - if strings.ContainsAny(pos.Filename, "\r\n") { - if p.sourcePosErr == nil { - p.sourcePosErr = fmt.Errorf("go/printer: source filename contains unexpected newline character: %q", pos.Filename) - } - return - } - - p.output = append(p.output, tabwriter.Escape) // protect '\n' in //line from tabwriter interpretation - p.output = append(p.output, fmt.Sprintf("//line %s:%d\n", pos.Filename, pos.Line)...) - p.output = append(p.output, tabwriter.Escape) - // p.out must match the //line directive - p.out.Filename = pos.Filename - p.out.Line = pos.Line - } -} - -// writeIndent writes indentation. -func (p *printer) writeIndent() { - // use "hard" htabs - indentation columns - // must not be discarded by the tabwriter - n := p.Config.Indent + p.indent // include base indentation - for i := 0; i < n; i++ { - p.output = append(p.output, '\t') - } - - // update positions - p.pos.Offset += n - p.pos.Column += n - p.out.Column += n -} - -// writeByte writes ch n times to p.output and updates p.pos. -// Only used to write formatting (white space) characters. -func (p *printer) writeByte(ch byte, n int) { - if p.endAlignment { - // Ignore any alignment control character; - // and at the end of the line, break with - // a formfeed to indicate termination of - // existing columns. - switch ch { - case '\t', '\v': - ch = ' ' - case '\n', '\f': - ch = '\f' - p.endAlignment = false - } - } - - if p.out.Column == 1 { - // no need to write line directives before white space - p.writeIndent() - } - - for i := 0; i < n; i++ { - p.output = append(p.output, ch) - } - - // update positions - p.pos.Offset += n - if ch == '\n' || ch == '\f' { - p.pos.Line += n - p.out.Line += n - p.pos.Column = 1 - p.out.Column = 1 - return - } - p.pos.Column += n - p.out.Column += n -} - -// writeString writes the string s to p.output and updates p.pos, p.out, -// and p.last. If isLit is set, s is escaped w/ tabwriter.Escape characters -// to protect s from being interpreted by the tabwriter. -// -// Note: writeString is only used to write Go tokens, literals, and -// comments, all of which must be written literally. Thus, it is correct -// to always set isLit = true. However, setting it explicitly only when -// needed (i.e., when we don't know that s contains no tabs or line breaks) -// avoids processing extra escape characters and reduces run time of the -// printer benchmark by up to 10%. -func (p *printer) writeString(pos token.Position, s string, isLit bool) { - if p.out.Column == 1 { - if p.Config.Mode&SourcePos != 0 { - p.writeLineDirective(pos) - } - p.writeIndent() - } - - if pos.IsValid() { - // update p.pos (if pos is invalid, continue with existing p.pos) - // Note: Must do this after handling line beginnings because - // writeIndent updates p.pos if there's indentation, but p.pos - // is the position of s. - p.pos = pos - } - - if isLit { - // Protect s such that is passes through the tabwriter - // unchanged. Note that valid Go programs cannot contain - // tabwriter.Escape bytes since they do not appear in legal - // UTF-8 sequences. - p.output = append(p.output, tabwriter.Escape) - } - - if debug { - p.output = append(p.output, fmt.Sprintf("/*%s*/", pos)...) // do not update p.pos! - } - p.output = append(p.output, s...) - - // update positions - nlines := 0 - var li int // index of last newline; valid if nlines > 0 - for i := 0; i < len(s); i++ { - // Raw string literals may contain any character except back quote (`). - if ch := s[i]; ch == '\n' || ch == '\f' { - // account for line break - nlines++ - li = i - // A line break inside a literal will break whatever column - // formatting is in place; ignore any further alignment through - // the end of the line. - p.endAlignment = true - } - } - p.pos.Offset += len(s) - if nlines > 0 { - p.pos.Line += nlines - p.out.Line += nlines - c := len(s) - li - p.pos.Column = c - p.out.Column = c - } else { - p.pos.Column += len(s) - p.out.Column += len(s) - } - - if isLit { - p.output = append(p.output, tabwriter.Escape) - } - - p.last = p.pos -} - -// writeCommentPrefix writes the whitespace before a comment. -// If there is any pending whitespace, it consumes as much of -// it as is likely to help position the comment nicely. -// pos is the comment position, next the position of the item -// after all pending comments, prev is the previous comment in -// a group of comments (or nil), and tok is the next token. -func (p *printer) writeCommentPrefix(pos, next token.Position, prev *ast.Comment, tok token.Token) { - if len(p.output) == 0 { - // the comment is the first item to be printed - don't write any whitespace - return - } - - if pos.IsValid() && pos.Filename != p.last.Filename { - // comment in a different file - separate with newlines - p.writeByte('\f', maxNewlines) - return - } - - if pos.Line == p.last.Line && (prev == nil || prev.Text[1] != '/') { - // comment on the same line as last item: - // separate with at least one separator - hasSep := false - if prev == nil { - // first comment of a comment group - j := 0 - for i, ch := range p.wsbuf { - switch ch { - case blank: - // ignore any blanks before a comment - p.wsbuf[i] = ignore - continue - case vtab: - // respect existing tabs - important - // for proper formatting of commented structs - hasSep = true - continue - case indent: - // apply pending indentation - continue - } - j = i - break - } - p.writeWhitespace(j) - } - // make sure there is at least one separator - if !hasSep { - sep := byte('\t') - if pos.Line == next.Line { - // next item is on the same line as the comment - // (which must be a /*-style comment): separate - // with a blank instead of a tab - sep = ' ' - } - p.writeByte(sep, 1) - } - - } else { - // comment on a different line: - // separate with at least one line break - droppedLinebreak := false - j := 0 - for i, ch := range p.wsbuf { - switch ch { - case blank, vtab: - // ignore any horizontal whitespace before line breaks - p.wsbuf[i] = ignore - continue - case indent: - // apply pending indentation - continue - case unindent: - // if this is not the last unindent, apply it - // as it is (likely) belonging to the last - // construct (e.g., a multi-line expression list) - // and is not part of closing a block - if i+1 < len(p.wsbuf) && p.wsbuf[i+1] == unindent { - continue - } - // if the next token is not a closing }, apply the unindent - // if it appears that the comment is aligned with the - // token; otherwise assume the unindent is part of a - // closing block and stop (this scenario appears with - // comments before a case label where the comments - // apply to the next case instead of the current one) - if tok != token.RBRACE && pos.Column == next.Column { - continue - } - case newline, formfeed: - p.wsbuf[i] = ignore - droppedLinebreak = prev == nil // record only if first comment of a group - } - j = i - break - } - p.writeWhitespace(j) - - // determine number of linebreaks before the comment - n := 0 - if pos.IsValid() && p.last.IsValid() { - n = pos.Line - p.last.Line - if n < 0 { // should never happen - n = 0 - } - } - - // at the package scope level only (p.indent == 0), - // add an extra newline if we dropped one before: - // this preserves a blank line before documentation - // comments at the package scope level (issue 2570) - if p.indent == 0 && droppedLinebreak { - n++ - } - - // make sure there is at least one line break - // if the previous comment was a line comment - if n == 0 && prev != nil && prev.Text[1] == '/' { - n = 1 - } - - if n > 0 { - // use formfeeds to break columns before a comment; - // this is analogous to using formfeeds to separate - // individual lines of /*-style comments - p.writeByte('\f', nlimit(n)) - } - } -} - -// Returns true if s contains only white space -// (only tabs and blanks can appear in the printer's context). -func isBlank(s string) bool { - for i := 0; i < len(s); i++ { - if s[i] > ' ' { - return false - } - } - return true -} - -// commonPrefix returns the common prefix of a and b. -func commonPrefix(a, b string) string { - i := 0 - for i < len(a) && i < len(b) && a[i] == b[i] && (a[i] <= ' ' || a[i] == '*') { - i++ - } - return a[0:i] -} - -// trimRight returns s with trailing whitespace removed. -func trimRight(s string) string { - return strings.TrimRightFunc(s, unicode.IsSpace) -} - -// stripCommonPrefix removes a common prefix from /*-style comment lines (unless no -// comment line is indented, all but the first line have some form of space prefix). -// The prefix is computed using heuristics such that is likely that the comment -// contents are nicely laid out after re-printing each line using the printer's -// current indentation. -func stripCommonPrefix(lines []string) { - if len(lines) <= 1 { - return // at most one line - nothing to do - } - // len(lines) > 1 - - // The heuristic in this function tries to handle a few - // common patterns of /*-style comments: Comments where - // the opening /* and closing */ are aligned and the - // rest of the comment text is aligned and indented with - // blanks or tabs, cases with a vertical "line of stars" - // on the left, and cases where the closing */ is on the - // same line as the last comment text. - - // Compute maximum common white prefix of all but the first, - // last, and blank lines, and replace blank lines with empty - // lines (the first line starts with /* and has no prefix). - // In cases where only the first and last lines are not blank, - // such as two-line comments, or comments where all inner lines - // are blank, consider the last line for the prefix computation - // since otherwise the prefix would be empty. - // - // Note that the first and last line are never empty (they - // contain the opening /* and closing */ respectively) and - // thus they can be ignored by the blank line check. - prefix := "" - prefixSet := false - if len(lines) > 2 { - for i, line := range lines[1 : len(lines)-1] { - if isBlank(line) { - lines[1+i] = "" // range starts with lines[1] - } else { - if !prefixSet { - prefix = line - prefixSet = true - } - prefix = commonPrefix(prefix, line) - } - - } - } - // If we don't have a prefix yet, consider the last line. - if !prefixSet { - line := lines[len(lines)-1] - prefix = commonPrefix(line, line) - } - - /* - * Check for vertical "line of stars" and correct prefix accordingly. - */ - lineOfStars := false - if p, _, ok := strings.Cut(prefix, "*"); ok { - // remove trailing blank from prefix so stars remain aligned - prefix = strings.TrimSuffix(p, " ") - lineOfStars = true - } else { - // No line of stars present. - // Determine the white space on the first line after the /* - // and before the beginning of the comment text, assume two - // blanks instead of the /* unless the first character after - // the /* is a tab. If the first comment line is empty but - // for the opening /*, assume up to 3 blanks or a tab. This - // whitespace may be found as suffix in the common prefix. - first := lines[0] - if isBlank(first[2:]) { - // no comment text on the first line: - // reduce prefix by up to 3 blanks or a tab - // if present - this keeps comment text indented - // relative to the /* and */'s if it was indented - // in the first place - i := len(prefix) - for n := 0; n < 3 && i > 0 && prefix[i-1] == ' '; n++ { - i-- - } - if i == len(prefix) && i > 0 && prefix[i-1] == '\t' { - i-- - } - prefix = prefix[0:i] - } else { - // comment text on the first line - suffix := make([]byte, len(first)) - n := 2 // start after opening /* - for n < len(first) && first[n] <= ' ' { - suffix[n] = first[n] - n++ - } - if n > 2 && suffix[2] == '\t' { - // assume the '\t' compensates for the /* - suffix = suffix[2:n] - } else { - // otherwise assume two blanks - suffix[0], suffix[1] = ' ', ' ' - suffix = suffix[0:n] - } - // Shorten the computed common prefix by the length of - // suffix, if it is found as suffix of the prefix. - prefix = strings.TrimSuffix(prefix, string(suffix)) - } - } - - // Handle last line: If it only contains a closing */, align it - // with the opening /*, otherwise align the text with the other - // lines. - last := lines[len(lines)-1] - closing := "*/" - before, _, _ := strings.Cut(last, closing) // closing always present - if isBlank(before) { - // last line only contains closing */ - if lineOfStars { - closing = " */" // add blank to align final star - } - lines[len(lines)-1] = prefix + closing - } else { - // last line contains more comment text - assume - // it is aligned like the other lines and include - // in prefix computation - prefix = commonPrefix(prefix, last) - } - - // Remove the common prefix from all but the first and empty lines. - for i, line := range lines { - if i > 0 && line != "" { - lines[i] = line[len(prefix):] - } - } -} - -func (p *printer) writeComment(comment *ast.Comment) { - text := comment.Text - pos := p.posFor(comment.Pos()) - - const linePrefix = "//line " - if strings.HasPrefix(text, linePrefix) && (!pos.IsValid() || pos.Column == 1) { - // Possibly a //-style line directive. - // Suspend indentation temporarily to keep line directive valid. - defer func(indent int) { p.indent = indent }(p.indent) - p.indent = 0 - } - - // shortcut common case of //-style comments - if text[1] == '/' { - if constraint.IsGoBuild(text) { - p.goBuild = append(p.goBuild, len(p.output)) - } else if constraint.IsPlusBuild(text) { - p.plusBuild = append(p.plusBuild, len(p.output)) - } - p.writeString(pos, trimRight(text), true) - return - } - - // for /*-style comments, print line by line and let the - // write function take care of the proper indentation - lines := strings.Split(text, "\n") - - // The comment started in the first column but is going - // to be indented. For an idempotent result, add indentation - // to all lines such that they look like they were indented - // before - this will make sure the common prefix computation - // is the same independent of how many times formatting is - // applied (was issue 1835). - if pos.IsValid() && pos.Column == 1 && p.indent > 0 { - for i, line := range lines[1:] { - lines[1+i] = " " + line - } - } - - stripCommonPrefix(lines) - - // write comment lines, separated by formfeed, - // without a line break after the last line - for i, line := range lines { - if i > 0 { - p.writeByte('\f', 1) - pos = p.pos - } - if len(line) > 0 { - p.writeString(pos, trimRight(line), true) - } - } -} - -// writeCommentSuffix writes a line break after a comment if indicated -// and processes any leftover indentation information. If a line break -// is needed, the kind of break (newline vs formfeed) depends on the -// pending whitespace. The writeCommentSuffix result indicates if a -// newline was written or if a formfeed was dropped from the whitespace -// buffer. -func (p *printer) writeCommentSuffix(needsLinebreak bool) (wroteNewline, droppedFF bool) { - for i, ch := range p.wsbuf { - switch ch { - case blank, vtab: - // ignore trailing whitespace - p.wsbuf[i] = ignore - case indent, unindent: - // don't lose indentation information - case newline, formfeed: - // if we need a line break, keep exactly one - // but remember if we dropped any formfeeds - if needsLinebreak { - needsLinebreak = false - wroteNewline = true - } else { - if ch == formfeed { - droppedFF = true - } - p.wsbuf[i] = ignore - } - } - } - p.writeWhitespace(len(p.wsbuf)) - - // make sure we have a line break - if needsLinebreak { - p.writeByte('\n', 1) - wroteNewline = true - } - - return -} - -// containsLinebreak reports whether the whitespace buffer contains any line breaks. -func (p *printer) containsLinebreak() bool { - for _, ch := range p.wsbuf { - if ch == newline || ch == formfeed { - return true - } - } - return false -} - -// intersperseComments consumes all comments that appear before the next token -// tok and prints it together with the buffered whitespace (i.e., the whitespace -// that needs to be written before the next token). A heuristic is used to mix -// the comments and whitespace. The intersperseComments result indicates if a -// newline was written or if a formfeed was dropped from the whitespace buffer. -func (p *printer) intersperseComments(next token.Position, tok token.Token) (wroteNewline, droppedFF bool) { - var last *ast.Comment - for p.commentBefore(next) { - list := p.comment.List - changed := false - if p.lastTok != token.IMPORT && // do not rewrite cgo's import "C" comments - p.posFor(p.comment.Pos()).Column == 1 && - p.posFor(p.comment.End()+1) == next { - // Unindented comment abutting next token position: - // a top-level doc comment. - list = formatDocComment(list) - changed = true - - if len(p.comment.List) > 0 && len(list) == 0 { - // The doc comment was removed entirely. - // Keep preceding whitespace. - p.writeCommentPrefix(p.posFor(p.comment.Pos()), next, last, tok) - // Change print state to continue at next. - p.pos = next - p.last = next - // There can't be any more comments. - p.nextComment() - return p.writeCommentSuffix(false) - } - } - for _, c := range list { - p.writeCommentPrefix(p.posFor(c.Pos()), next, last, tok) - p.writeComment(c) - last = c - } - // In case list was rewritten, change print state to where - // the original list would have ended. - if len(p.comment.List) > 0 && changed { - last = p.comment.List[len(p.comment.List)-1] - p.pos = p.posFor(last.End()) - p.last = p.pos - } - p.nextComment() - } - - if last != nil { - // If the last comment is a /*-style comment and the next item - // follows on the same line but is not a comma, and not a "closing" - // token immediately following its corresponding "opening" token, - // add an extra separator unless explicitly disabled. Use a blank - // as separator unless we have pending linebreaks, they are not - // disabled, and we are outside a composite literal, in which case - // we want a linebreak (issue 15137). - // TODO(gri) This has become overly complicated. We should be able - // to track whether we're inside an expression or statement and - // use that information to decide more directly. - needsLinebreak := false - if p.mode&noExtraBlank == 0 && - last.Text[1] == '*' && p.lineFor(last.Pos()) == next.Line && - tok != token.COMMA && - (tok != token.RPAREN || p.prevOpen == token.LPAREN) && - (tok != token.RBRACK || p.prevOpen == token.LBRACK) { - if p.containsLinebreak() && p.mode&noExtraLinebreak == 0 && p.level == 0 { - needsLinebreak = true - } else { - p.writeByte(' ', 1) - } - } - // Ensure that there is a line break after a //-style comment, - // before EOF, and before a closing '}' unless explicitly disabled. - if last.Text[1] == '/' || - tok == token.EOF || - tok == token.RBRACE && p.mode&noExtraLinebreak == 0 { - needsLinebreak = true - } - return p.writeCommentSuffix(needsLinebreak) - } - - // no comment was written - we should never reach here since - // intersperseComments should not be called in that case - p.internalError("intersperseComments called without pending comments") - return -} - -// writeWhitespace writes the first n whitespace entries. -func (p *printer) writeWhitespace(n int) { - // write entries - for i := 0; i < n; i++ { - switch ch := p.wsbuf[i]; ch { - case ignore: - // ignore! - case indent: - p.indent++ - case unindent: - p.indent-- - if p.indent < 0 { - p.internalError("negative indentation:", p.indent) - p.indent = 0 - } - case newline, formfeed: - // A line break immediately followed by a "correcting" - // unindent is swapped with the unindent - this permits - // proper label positioning. If a comment is between - // the line break and the label, the unindent is not - // part of the comment whitespace prefix and the comment - // will be positioned correctly indented. - if i+1 < n && p.wsbuf[i+1] == unindent { - // Use a formfeed to terminate the current section. - // Otherwise, a long label name on the next line leading - // to a wide column may increase the indentation column - // of lines before the label; effectively leading to wrong - // indentation. - p.wsbuf[i], p.wsbuf[i+1] = unindent, formfeed - i-- // do it again - continue - } - fallthrough - default: - p.writeByte(byte(ch), 1) - } - } - - // shift remaining entries down - l := copy(p.wsbuf, p.wsbuf[n:]) - p.wsbuf = p.wsbuf[:l] -} - -// ---------------------------------------------------------------------------- -// Printing interface - -// nlimit limits n to maxNewlines. -func nlimit(n int) int { - return min(n, maxNewlines) -} - -func mayCombine(prev token.Token, next byte) (b bool) { - switch prev { - case token.INT: - b = next == '.' // 1. - case token.ADD: - b = next == '+' // ++ - case token.SUB: - b = next == '-' // -- - case token.QUO: - b = next == '*' // /* - case token.LSS: - b = next == '-' || next == '<' // <- or << - case token.AND: - b = next == '&' || next == '^' // && or &^ - } - return -} - -func (p *printer) setPos(pos token.Pos) { - if pos.IsValid() { - p.pos = p.posFor(pos) // accurate position of next item - } -} - -// print prints a list of "items" (roughly corresponding to syntactic -// tokens, but also including whitespace and formatting information). -// It is the only print function that should be called directly from -// any of the AST printing functions in nodes.go. -// -// Whitespace is accumulated until a non-whitespace token appears. Any -// comments that need to appear before that token are printed first, -// taking into account the amount and structure of any pending white- -// space for best comment placement. Then, any leftover whitespace is -// printed, followed by the actual token. -func (p *printer) print(args ...any) { - for _, arg := range args { - // information about the current arg - var data string - var isLit bool - var impliedSemi bool // value for p.impliedSemi after this arg - - // record previous opening token, if any - switch p.lastTok { - case token.ILLEGAL: - // ignore (white space) - case token.LPAREN, token.LBRACK: - p.prevOpen = p.lastTok - default: - // other tokens followed any opening token - p.prevOpen = token.ILLEGAL - } - - switch x := arg.(type) { - case pmode: - // toggle printer mode - p.mode ^= x - continue - - case whiteSpace: - if x == ignore { - // don't add ignore's to the buffer; they - // may screw up "correcting" unindents (see - // LabeledStmt) - continue - } - i := len(p.wsbuf) - if i == cap(p.wsbuf) { - // Whitespace sequences are very short so this should - // never happen. Handle gracefully (but possibly with - // bad comment placement) if it does happen. - p.writeWhitespace(i) - i = 0 - } - p.wsbuf = p.wsbuf[0 : i+1] - p.wsbuf[i] = x - if x == newline || x == formfeed { - // newlines affect the current state (p.impliedSemi) - // and not the state after printing arg (impliedSemi) - // because comments can be interspersed before the arg - // in this case - p.impliedSemi = false - } - p.lastTok = token.ILLEGAL - continue - - case *ast.Ident: - data = x.Name - impliedSemi = true - p.lastTok = token.IDENT - - case *ast.BasicLit: - data = x.Value - isLit = true - impliedSemi = true - p.lastTok = x.Kind - - case token.Token: - s := x.String() - if mayCombine(p.lastTok, s[0]) { - // the previous and the current token must be - // separated by a blank otherwise they combine - // into a different incorrect token sequence - // (except for token.INT followed by a '.' this - // should never happen because it is taken care - // of via binary expression formatting) - if len(p.wsbuf) != 0 { - p.internalError("whitespace buffer not empty") - } - p.wsbuf = p.wsbuf[0:1] - p.wsbuf[0] = ' ' - } - data = s - // some keywords followed by a newline imply a semicolon - switch x { - case token.BREAK, token.CONTINUE, token.FALLTHROUGH, token.RETURN, - token.INC, token.DEC, token.RPAREN, token.RBRACK, token.RBRACE: - impliedSemi = true - } - p.lastTok = x - - case string: - // incorrect AST - print error message - data = x - isLit = true - impliedSemi = true - p.lastTok = token.STRING - - default: - fmt.Fprintf(os.Stderr, "print: unsupported argument %v (%T)\n", arg, arg) - panic("go/printer type") - } - // data != "" - - next := p.pos // estimated/accurate position of next item - wroteNewline, droppedFF := p.flush(next, p.lastTok) - - // intersperse extra newlines if present in the source and - // if they don't cause extra semicolons (don't do this in - // flush as it will cause extra newlines at the end of a file) - if !p.impliedSemi { - n := nlimit(next.Line - p.pos.Line) - // don't exceed maxNewlines if we already wrote one - if wroteNewline && n == maxNewlines { - n = maxNewlines - 1 - } - if n > 0 { - ch := byte('\n') - if droppedFF { - ch = '\f' // use formfeed since we dropped one before - } - p.writeByte(ch, n) - impliedSemi = false - } - } - - // the next token starts now - record its line number if requested - if p.linePtr != nil { - *p.linePtr = p.out.Line - p.linePtr = nil - } - - p.writeString(next, data, isLit) - p.impliedSemi = impliedSemi - } -} - -// flush prints any pending comments and whitespace occurring textually -// before the position of the next token tok. The flush result indicates -// if a newline was written or if a formfeed was dropped from the whitespace -// buffer. -func (p *printer) flush(next token.Position, tok token.Token) (wroteNewline, droppedFF bool) { - if p.commentBefore(next) { - // if there are comments before the next item, intersperse them - wroteNewline, droppedFF = p.intersperseComments(next, tok) - } else { - // otherwise, write any leftover whitespace - p.writeWhitespace(len(p.wsbuf)) - } - return -} - -// getDoc returns the ast.CommentGroup associated with n, if any. -func getDoc(n ast.Node) *ast.CommentGroup { - switch n := n.(type) { - case *ast.Field: - return n.Doc - case *ast.ImportSpec: - return n.Doc - case *ast.ValueSpec: - return n.Doc - case *ast.TypeSpec: - return n.Doc - case *ast.GenDecl: - return n.Doc - case *ast.FuncDecl: - return n.Doc - case *ast.File: - return n.Doc - } - return nil -} - -func getLastComment(n ast.Node) *ast.CommentGroup { - switch n := n.(type) { - case *ast.Field: - return n.Comment - case *ast.ImportSpec: - return n.Comment - case *ast.ValueSpec: - return n.Comment - case *ast.TypeSpec: - return n.Comment - case *ast.GenDecl: - if len(n.Specs) > 0 { - return getLastComment(n.Specs[len(n.Specs)-1]) - } - case *ast.File: - if len(n.Comments) > 0 { - return n.Comments[len(n.Comments)-1] - } - } - return nil -} - -func (p *printer) printNode(node any) error { - // unpack *CommentedNode, if any - var comments []*ast.CommentGroup - if cnode, ok := node.(*CommentedNode); ok { - node = cnode.Node - comments = cnode.Comments - } - - if comments != nil { - // commented node - restrict comment list to relevant range - n, ok := node.(ast.Node) - if !ok { - goto unsupported - } - beg := n.Pos() - end := n.End() - // if the node has associated documentation, - // include that commentgroup in the range - // (the comment list is sorted in the order - // of the comment appearance in the source code) - if doc := getDoc(n); doc != nil { - beg = doc.Pos() - } - if com := getLastComment(n); com != nil { - if e := com.End(); e > end { - end = e - } - } - // token.Pos values are global offsets, we can - // compare them directly - i := 0 - for i < len(comments) && comments[i].End() < beg { - i++ - } - j := i - for j < len(comments) && comments[j].Pos() < end { - j++ - } - if i < j { - p.comments = comments[i:j] - } - } else if n, ok := node.(*ast.File); ok { - // use ast.File comments, if any - p.comments = n.Comments - } - - // if there are no comments, use node comments - p.useNodeComments = p.comments == nil - - // get comments ready for use - p.nextComment() - - p.print(pmode(0)) - - // format node - switch n := node.(type) { - case ast.Expr: - p.expr(n) - case ast.Stmt: - // A labeled statement will un-indent to position the label. - // Set p.indent to 1 so we don't get indent "underflow". - if _, ok := n.(*ast.LabeledStmt); ok { - p.indent = 1 - } - p.stmt(n, false, false) - case ast.Decl: - p.decl(n) - case ast.Spec: - p.spec(n, 1, false, token.EOF) - case []ast.Stmt: - // A labeled statement will un-indent to position the label. - // Set p.indent to 1 so we don't get indent "underflow". - for _, s := range n { - if _, ok := s.(*ast.LabeledStmt); ok { - p.indent = 1 - } - } - p.stmtList(n, 0, false) - case []ast.Decl: - p.declList(n) - case *ast.File: - p.file(n) - default: - goto unsupported - } - - return p.sourcePosErr - -unsupported: - return fmt.Errorf("go/printer: unsupported node type %T", node) -} - -// ---------------------------------------------------------------------------- -// Trimmer - -// A trimmer is an io.Writer filter for stripping tabwriter.Escape -// characters, trailing blanks and tabs, and for converting formfeed -// and vtab characters into newlines and htabs (in case no tabwriter -// is used). Text bracketed by tabwriter.Escape characters is passed -// through unchanged. -type trimmer struct { - output io.Writer - state int - space []byte -} - -// trimmer is implemented as a state machine. -// It can be in one of the following states: -const ( - inSpace = iota // inside space - inEscape // inside text bracketed by tabwriter.Escapes - inText // inside text -) - -func (p *trimmer) resetSpace() { - p.state = inSpace - p.space = p.space[0:0] -} - -// Design note: It is tempting to eliminate extra blanks occurring in -// whitespace in this function as it could simplify some -// of the blanks logic in the node printing functions. -// However, this would mess up any formatting done by -// the tabwriter. - -var aNewline = []byte("\n") - -func (p *trimmer) Write(data []byte) (n int, err error) { - // invariants: - // p.state == inSpace: - // p.space is unwritten - // p.state == inEscape, inText: - // data[m:n] is unwritten - m := 0 - var b byte - for n, b = range data { - if b == '\v' { - b = '\t' // convert to htab - } - switch p.state { - case inSpace: - switch b { - case '\t', ' ': - p.space = append(p.space, b) - case '\n', '\f': - p.resetSpace() // discard trailing space - _, err = p.output.Write(aNewline) - case tabwriter.Escape: - _, err = p.output.Write(p.space) - p.state = inEscape - m = n + 1 // +1: skip tabwriter.Escape - default: - _, err = p.output.Write(p.space) - p.state = inText - m = n - } - case inEscape: - if b == tabwriter.Escape { - _, err = p.output.Write(data[m:n]) - p.resetSpace() - } - case inText: - switch b { - case '\t', ' ': - _, err = p.output.Write(data[m:n]) - p.resetSpace() - p.space = append(p.space, b) - case '\n', '\f': - _, err = p.output.Write(data[m:n]) - p.resetSpace() - if err == nil { - _, err = p.output.Write(aNewline) - } - case tabwriter.Escape: - _, err = p.output.Write(data[m:n]) - p.state = inEscape - m = n + 1 // +1: skip tabwriter.Escape - } - default: - panic("unreachable") - } - if err != nil { - return - } - } - n = len(data) - - switch p.state { - case inEscape, inText: - _, err = p.output.Write(data[m:n]) - p.resetSpace() - } - - return -} - -// ---------------------------------------------------------------------------- -// Public interface - -// A Mode value is a set of flags (or 0). They control printing. -type Mode uint - -const ( - RawFormat Mode = 1 << iota // do not use a tabwriter; if set, UseSpaces is ignored - TabIndent // use tabs for indentation independent of UseSpaces - UseSpaces // use spaces instead of tabs for alignment - SourcePos // emit //line directives to preserve original source positions -) - -// The mode below is not included in printer's public API because -// editing code text is deemed out of scope. Because this mode is -// unexported, it's also possible to modify or remove it based on -// the evolving needs of go/format and cmd/gofmt without breaking -// users. See discussion in CL 240683. -const ( - // normalizeNumbers means to canonicalize number - // literal prefixes and exponents while printing. - // - // This value is known in and used by go/format and cmd/gofmt. - // It is currently more convenient and performant for those - // packages to apply number normalization during printing, - // rather than by modifying the AST in advance. - normalizeNumbers Mode = 1 << 30 -) - -// A Config node controls the output of Fprint. -type Config struct { - Mode Mode // default: 0 - Tabwidth int // default: 8 - Indent int // default: 0 (all code is indented at least by this much) - - ExcludeFunctions map[string]bool -} - -var printerPool = sync.Pool{ - New: func() any { - return &printer{ - // Whitespace sequences are short. - wsbuf: make([]whiteSpace, 0, 16), - // We start the printer with a 16K output buffer, which is currently - // larger than about 80% of Go files in the standard library. - output: make([]byte, 0, 16<<10), - } - }, -} - -func newPrinter(cfg *Config, pkg *packages.Package, nodeSizes map[ast.Node]int) *printer { - p := printerPool.Get().(*printer) - *p = printer{ - Config: *cfg, - pkg: pkg, - fset: pkg.Fset, - pos: token.Position{Line: 1, Column: 1}, - out: token.Position{Line: 1, Column: 1}, - wsbuf: p.wsbuf[:0], - nodeSizes: nodeSizes, - cachedPos: -1, - output: p.output[:0], - } - return p -} - -func (p *printer) free() { - // Hard limit on buffer size; see https://golang.org/issue/23199. - if cap(p.output) > 64<<10 { - return - } - - printerPool.Put(p) -} - -// fprint implements Fprint and takes a nodesSizes map for setting up the printer state. -func (cfg *Config) fprint(output io.Writer, pkg *packages.Package, node any, nodeSizes map[ast.Node]int) (err error) { - // print node - p := newPrinter(cfg, pkg, nodeSizes) - defer p.free() - if err = p.printNode(node); err != nil { - return - } - // print outstanding comments - p.impliedSemi = false // EOF acts like a newline - p.flush(token.Position{Offset: infinity, Line: infinity}, token.EOF) - - // output is buffered in p.output now. - // fix //go:build and // +build comments if needed. - p.fixGoBuildLines() - - // redirect output through a trimmer to eliminate trailing whitespace - // (Input to a tabwriter must be untrimmed since trailing tabs provide - // formatting information. The tabwriter could provide trimming - // functionality but no tabwriter is used when RawFormat is set.) - output = &trimmer{output: output} - - // redirect output through a tabwriter if necessary - if cfg.Mode&RawFormat == 0 { - minwidth := cfg.Tabwidth - - padchar := byte('\t') - if cfg.Mode&UseSpaces != 0 { - padchar = ' ' - } - - twmode := tabwriter.DiscardEmptyColumns - if cfg.Mode&TabIndent != 0 { - minwidth = 0 - twmode |= tabwriter.TabIndent - } - - output = tabwriter.NewWriter(output, minwidth, cfg.Tabwidth, 1, padchar, twmode) - } - - // write printer result via tabwriter/trimmer to output - if _, err = output.Write(p.output); err != nil { - return - } - - // flush tabwriter, if any - if tw, _ := output.(*tabwriter.Writer); tw != nil { - err = tw.Flush() - } - - return -} - -// A CommentedNode bundles an AST node and corresponding comments. -// It may be provided as argument to any of the [Fprint] functions. -type CommentedNode struct { - Node any // *ast.File, or ast.Expr, ast.Decl, ast.Spec, or ast.Stmt - Comments []*ast.CommentGroup -} - -// Fprint "pretty-prints" an AST node to output for a given configuration cfg. -// Position information is interpreted relative to the file set fset. -// The node type must be *[ast.File], *[CommentedNode], [][ast.Decl], [][ast.Stmt], -// or assignment-compatible to [ast.Expr], [ast.Decl], [ast.Spec], or [ast.Stmt]. -func (cfg *Config) Fprint(output io.Writer, pkg *packages.Package, node any) error { - return cfg.fprint(output, pkg, node, make(map[ast.Node]int)) -} - -// Fprint "pretty-prints" an AST node to output. -// It calls [Config.Fprint] with default settings. -// Note that gofmt uses tabs for indentation but spaces for alignment; -// use format.Node (package go/format) for output that matches gofmt. -func Fprint(output io.Writer, pkg *packages.Package, node any) error { - return (&Config{Tabwidth: 8}).Fprint(output, pkg, node) -} diff --git a/gpu/gosl/slrand/README.md b/gpu/gosl/slrand/README.md deleted file mode 100644 index 15e6ca02ff..0000000000 --- a/gpu/gosl/slrand/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# slrand - -This package contains WGSL header files and matching Go code for various random number generation (RNG) functions. The `gosl` tool will automatically copy the `slrand.wgsl` self-contained file into the destination `shaders` directory if the Go code contains `slrand.` prefix. Here's how you include: - -```Go -//gosl:wgsl mycode -// #include "slrand.wgsl" -//gosl:end mycode -``` - -`slrand` uses the [Philox2x32](https://github.com/DEShawResearch/random123) algorithm which is also available on CUDA on their [cuRNG](https://docs.nvidia.com/cuda/curand/host-api-overview.html) and in [Tensorflow](https://www.tensorflow.org/guide/random_numbers#general). A recent [evaluation](https://www.mdpi.com/2079-3197/9/12/142#B69-computation-09-00142) showed it to be the fastest GPU RNG, which also passes the standards for statistical quality (e.g., BigCrush). It is a counter based RNG, [CBRNG](https://en.wikipedia.org/wiki/Counter-based_random_number_generator_(CBRNG), where the random number is a direct function of the input state, with no other internal state. For a useful discussion of other alternatives, see [reddit cpp thread](https://www.reddit.com/r/cpp/comments/u3cnkk/old_rand_method_faster_than_new_alternatives/). The code is based on the D.E. Shaw [github](https://github.com/DEShawResearch/random123/blob/main/include/Random123/philox.h) implementation. - -The key advantage of this algorithm is its *stateless* nature, where the result is a deterministic but highly nonlinear function of its two inputs: -``` - fn Philox2x32(counter: vec2, key: u32) -> vec2; -``` -where the WGSL `vec2` type is 2 `uint32` 32-bit unsigned integers. For GPU usage, the `key` is always set to the unique element being processed (e.g., the index of the data structure being updated), ensuring that different numbers are generated for each such element, and the `counter` should be configured as a shared global value that is incremented after each iteration of computation. - - - - For example, if 4 RNG calls happen within a given set of GPU code, each thread starts with the same starting `counter` value, which is passed around as a local `vec2` variable and incremented locally for each RNG. Then, after all threads have been performed, the shared starting `counter` is incremented using `CounterAdd` by 4. - -The `Float` and `Uint32` etc wrapper functions around Philox2x32 will automatically increment the counter var passed to it, using the `CounterIncr()` method that manages the two 32 bit numbers as if they are a full 64 bit uint. - -The `slrand.Counter` struct provides a 16-byte aligned type for storing and incrementing the global counter. The `Seed` method initializes the starting counter value by setting the Hi uint32 value to given seed, which thus provides a random sequence length of over 4 billion numbers within the Lo uint32 counter -- use more widely spaced seed values for longer unique sequences. - -`gosl` will automatically translate the Go versions of the `slrand` package functions into their WGSL equivalents. - -See the [axon](https://github.com/emer/gosl/v2/tree/main/examples/axon) and [rand](https://github.com/emer/gosl/v2/tree/main/examples/rand) examples for how to use in combined Go / GPU code. In the axon example, the `slrand.Counter` is added to the `Time` context struct, and incremented after each cycle based on the number of random numbers generated for a single pass through the code, as determined by the parameter settings. The index of each neuron being processed is used as the `key`, which is consistent in CPU and GPU versions. Within each cycle, a *local* arg variable is incremented on each GPU processor as the computation unfolds, passed by reference after the top-level, so it updates as each RNG call is made within each pass. - -Critically, these examples show that the CPU and GPU code produce identical random number sequences, which is otherwise quite difficult to achieve without this specific form of RNG. - -# Implementational details - -Unfortunately, vulkan `glslang` does not support 64 bit integers, even though the shader language model has somehow been updated to support them: https://github.com/KhronosGroup/glslang/issues/2965 -- https://github.com/microsoft/DirectXShaderCompiler/issues/2067. This would also greatly speed up the impl: https://github.com/microsoft/DirectXShaderCompiler/issues/2821. - -The result is that we have to use the slower version of the MulHiLo algorithm using only 32 bit uints. - - - diff --git a/gpu/gosl/slrand/slrand.go b/gpu/gosl/slrand/slrand.go deleted file mode 100644 index 05453cdb60..0000000000 --- a/gpu/gosl/slrand/slrand.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slrand - -import ( - "cogentcore.org/core/gpu/gosl/sltype" - "cogentcore.org/core/math32" -) - -// These are Go versions of the same Philox2x32 based random number generator -// functions available in .WGSL. - -// Philox2x32round does one round of updating of the counter. -func Philox2x32round(counter uint64, key uint32) uint64 { - ctr := sltype.Uint64ToLoHi(counter) - mul := sltype.Uint64ToLoHi(sltype.Uint32Mul64(0xD256D193, ctr.X)) - ctr.X = mul.Y ^ key ^ ctr.Y - ctr.Y = mul.X - return sltype.Uint64FromLoHi(ctr) -} - -// Philox2x32bumpkey does one round of updating of the key -func Philox2x32bumpkey(key uint32) uint32 { - return key + 0x9E3779B9 -} - -// Philox2x32 implements the stateless counter-based RNG algorithm -// returning a random number as two uint32 values, given a -// counter and key input that determine the result. -func Philox2x32(counter uint64, key uint32) sltype.Uint32Vec2 { - // this is an unrolled loop of 10 updates based on initial counter and key, - // which produces the random deviation deterministically based on these inputs. - counter = Philox2x32round(counter, key) // 1 - key = Philox2x32bumpkey(key) - counter = Philox2x32round(counter, key) // 2 - key = Philox2x32bumpkey(key) - counter = Philox2x32round(counter, key) // 3 - key = Philox2x32bumpkey(key) - counter = Philox2x32round(counter, key) // 4 - key = Philox2x32bumpkey(key) - counter = Philox2x32round(counter, key) // 5 - key = Philox2x32bumpkey(key) - counter = Philox2x32round(counter, key) // 6 - key = Philox2x32bumpkey(key) - counter = Philox2x32round(counter, key) // 7 - key = Philox2x32bumpkey(key) - counter = Philox2x32round(counter, key) // 8 - key = Philox2x32bumpkey(key) - counter = Philox2x32round(counter, key) // 9 - key = Philox2x32bumpkey(key) - - return sltype.Uint64ToLoHi(Philox2x32round(counter, key)) // 10 -} - -//////////////////////////////////////////////////////////// -// Methods below provide a standard interface with more -// readable names, mapping onto the Go rand methods. -// -// They assume a global shared counter, which is then -// incremented by a function index, defined for each function -// consuming random numbers that _could_ be called within a parallel -// processing loop. At the end of the loop, the global counter should -// be incremented by the total possible number of such functions. -// This results in fully resproducible results, invariant to -// specific processing order, and invariant to whether any one function -// actually calls the random number generator. - -// Uint32Vec2 returns two uniformly distributed 32 unsigned integers, -// based on given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -func Uint32Vec2(counter uint64, funcIndex uint32, key uint32) sltype.Uint32Vec2 { - return Philox2x32(sltype.Uint64Add32(counter, funcIndex), key) -} - -// Uint32 returns a uniformly distributed 32 unsigned integer, -// based on given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -func Uint32(counter uint64, funcIndex uint32, key uint32) uint32 { - return Philox2x32(sltype.Uint64Add32(counter, funcIndex), key).X -} - -// Float32Vec2 returns two uniformly distributed float32 values in range (0,1), -// based on given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -func Float32Vec2(counter uint64, funcIndex uint32, key uint32) sltype.Float32Vec2 { - return sltype.Uint32ToFloat32Vec2(Uint32Vec2(counter, funcIndex, key)) -} - -// Float32 returns a uniformly distributed float32 value in range (0,1), -// based on given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -func Float32(counter uint64, funcIndex uint32, key uint32) float32 { - return sltype.Uint32ToFloat32(Uint32(counter, funcIndex, key)) -} - -// Float32Range11Vec2 returns two uniformly distributed float32 values in range [-1,1], -// based on given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -func Float32Range11Vec2(counter uint64, funcIndex uint32, key uint32) sltype.Float32Vec2 { - return sltype.Uint32ToFloat32Vec2(Uint32Vec2(counter, funcIndex, key)) -} - -// Float32Range11 returns a uniformly distributed float32 value in range [-1,1], -// based on given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -func Float32Range11(counter uint64, funcIndex uint32, key uint32) float32 { - return sltype.Uint32ToFloat32Range11(Uint32(counter, funcIndex, key)) -} - -// BoolP returns a bool true value with probability p -// based on given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -func BoolP(counter uint64, funcIndex uint32, key uint32, p float32) bool { - return (Float32(counter, funcIndex, key) < p) -} - -func SincosPi(x float32) sltype.Float32Vec2 { - const PIf = 3.1415926535897932 - var r sltype.Float32Vec2 - r.Y, r.X = math32.Sincos(PIf * x) - return r -} - -// Float32NormVec2 returns two random float32 numbers -// distributed according to the normal, Gaussian distribution -// with zero mean and unit variance. -// This is done very efficiently using the Box-Muller algorithm -// that consumes two random 32 bit uint values. -// Uses given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -func Float32NormVec2(counter uint64, funcIndex uint32, key uint32) sltype.Float32Vec2 { - ur := Uint32Vec2(counter, funcIndex, key) - f := SincosPi(sltype.Uint32ToFloat32Range11(ur.X)) - r := math32.Sqrt(-2.0 * math32.Log(sltype.Uint32ToFloat32(ur.Y))) // guaranteed to avoid 0. - return f.MulScalar(r) -} - -// Float32Norm returns a random float32 number -// distributed according to the normal, Gaussian distribution -// with zero mean and unit variance. -// Uses given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -func Float32Norm(counter uint64, funcIndex uint32, key uint32) float32 { - return Float32Vec2(counter, funcIndex, key).X -} - -// Uint32N returns a uint32 in the range [0,N). -// Uses given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -func Uint32N(counter uint64, funcIndex uint32, key uint32, n uint32) uint32 { - v := Float32(counter, funcIndex, key) - return uint32(v * float32(n)) -} - -// Counter is used for storing the random counter using aligned 16 byte storage, -// with convenience methods for typical use cases. -// It retains a copy of the last Seed value, which is applied to the Hi uint32 value. -type Counter struct { - // Counter value - Counter uint64 - - // last seed value set by Seed method, restored by Reset() - HiSeed uint32 - - pad uint32 -} - -// Reset resets counter to last set Seed state -func (ct *Counter) Reset() { - ct.Counter = sltype.Uint64FromLoHi(sltype.Uint32Vec2{0, ct.HiSeed}) -} - -// Seed sets the Hi uint32 value from given seed, saving it in HiSeed field. -// Each increment in seed generates a unique sequence of over 4 billion numbers, -// so it is reasonable to just use incremental values there, but more widely -// spaced numbers will result in longer unique sequences. -// Resets Lo to 0. -// This same seed will be restored during Reset -func (ct *Counter) Seed(seed uint32) { - ct.HiSeed = seed - ct.Reset() -} - -// Add increments the counter by given amount. -// Call this after completing a pass of computation -// where the value passed here is the max of funcIndex+1 -// used for any possible random calls during that pass. -func (ct *Counter) Add(inc uint32) { - ct.Counter = sltype.Uint64Add32(ct.Counter, inc) -} diff --git a/gpu/gosl/slrand/slrand.wgsl b/gpu/gosl/slrand/slrand.wgsl deleted file mode 100644 index 820e7bdf62..0000000000 --- a/gpu/gosl/slrand/slrand.wgsl +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Original file is in Go package: github.com/cogentcore/core/gpu/gosl/slrand -// See README.md there for documentation. - -// These random number generation (RNG) functions are optimized for -// use on the GPU, with equivalent Go versions available in slrand.go. -// This is using the Philox2x32 counter-based RNG. - -#include "sltype.wgsl" - -// Philox2x32round does one round of updating of the counter. -fn Philox2x32round(counter: su64, key: u32) -> su64 { - let mul = Uint32Mul64(u32(0xD256D193), counter.x); - var ctr: su64; - ctr.x = mul.y ^ key ^ counter.y; - ctr.y = mul.x; - return ctr; -} - -// Philox2x32bumpkey does one round of updating of the key -fn Philox2x32bumpkey(key: u32) -> u32 { - return key + u32(0x9E3779B9); -} - -// Philox2x32 implements the stateless counter-based RNG algorithm -// returning a random number as two uint32 values, given a -// counter and key input that determine the result. -// The input counter is not modified. -fn Philox2x32(counter: su64, key: u32) -> vec2 { - // this is an unrolled loop of 10 updates based on initial counter and key, - // which produces the random deviation deterministically based on these inputs. - var ctr = Philox2x32round(counter, key); // 1 - var ky = Philox2x32bumpkey(key); - ctr = Philox2x32round(ctr, ky); // 2 - ky = Philox2x32bumpkey(ky); - ctr = Philox2x32round(ctr, ky); // 3 - ky = Philox2x32bumpkey(ky); - ctr = Philox2x32round(ctr, ky); // 4 - ky = Philox2x32bumpkey(ky); - ctr = Philox2x32round(ctr, ky); // 5 - ky = Philox2x32bumpkey(ky); - ctr = Philox2x32round(ctr, ky); // 6 - ky = Philox2x32bumpkey(ky); - ctr = Philox2x32round(ctr, ky); // 7 - ky = Philox2x32bumpkey(ky); - ctr = Philox2x32round(ctr, ky); // 8 - ky = Philox2x32bumpkey(ky); - ctr = Philox2x32round(ctr, ky); // 9 - ky = Philox2x32bumpkey(ky); - - return Philox2x32round(ctr, ky); // 10 -} - -//////////////////////////////////////////////////////////// -// Methods below provide a standard interface with more -// readable names, mapping onto the Go rand methods. -// -// They assume a global shared counter, which is then -// incremented by a function index, defined for each function -// consuming random numbers that _could_ be called within a parallel -// processing loop. At the end of the loop, the global counter should -// be incremented by the total possible number of such functions. -// This results in fully resproducible results, invariant to -// specific processing order, and invariant to whether any one function -// actually calls the random number generator. - -// RandUint32Vec2 returns two uniformly distributed 32 unsigned integers, -// based on given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandUint32Vec2(counter: su64, funcIndex: u32, key: u32) -> vec2 { - return Philox2x32(Uint64Add32(counter, funcIndex), key); -} - -// RandUint32 returns a uniformly distributed 32 unsigned integer, -// based on given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandUint32(counter: su64, funcIndex: u32, key: u32) -> u32 { - return Philox2x32(Uint64Add32(counter, funcIndex), key).x; -} - -// RandFloat32Vec2 returns two uniformly distributed float32 values in range (0,1), -// based on given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandFloat32Vec2(counter: su64, funcIndex: u32, key: u32) -> vec2 { - return Uint32ToFloat32Vec2(RandUint32Vec2(counter, funcIndex, key)); -} - -// RandFloat32 returns a uniformly distributed float32 value in range (0,1), -// based on given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandFloat32(counter: su64, funcIndex: u32, key: u32) -> f32 { - return Uint32ToFloat32(RandUint32(counter, funcIndex, key)); -} - -// RandFloat32Range11Vec2 returns two uniformly distributed float32 values in range [-1,1], -// based on given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandFloat32Range11Vec2(counter: su64, funcIndex: u32, key: u32) -> vec2 { - return Uint32ToFloat32Vec2(RandUint32Vec2(counter, funcIndex, key)); -} - -// RandFloat32Range11 returns a uniformly distributed float32 value in range [-1,1], -// based on given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandFloat32Range11(counter: su64, funcIndex: u32, key: u32) -> f32 { - return Uint32ToFloat32Range11(RandUint32(counter, funcIndex, key)); -} - -// RandBoolP returns a bool true value with probability p -// based on given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandBoolP(counter: su64, funcIndex: u32, key: u32, p: f32) -> bool { - return (RandFloat32(counter, funcIndex, key) < p); -} - -fn sincospi(x: f32) -> vec2 { - let PIf = 3.1415926535897932; - var r: vec2; - r.x = cos(PIf*x); - r.y = sin(PIf*x); - return r; -} - -// RandFloat32NormVec2 returns two random float32 numbers -// distributed according to the normal, Gaussian distribution -// with zero mean and unit variance. -// This is done very efficiently using the Box-Muller algorithm -// that consumes two random 32 bit uint values. -// Uses given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandFloat32NormVec2(counter: su64, funcIndex: u32, key: u32) -> vec2 { - let ur = RandUint32Vec2(counter, funcIndex, key); - var f = sincospi(Uint32ToFloat32Range11(ur.x)); - let r = sqrt(-2.0 * log(Uint32ToFloat32(ur.y))); // guaranteed to avoid 0. - return f * r; -} - -// RandFloat32Norm returns a random float32 number -// distributed according to the normal, Gaussian distribution -// with zero mean and unit variance. -// Uses given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandFloat32Norm(counter: su64, funcIndex: u32, key: u32) -> f32 { - return RandFloat32Vec2(counter, funcIndex, key).x; -} - -// RandUint32N returns a uint32 in the range [0,N). -// Uses given global shared counter, function index offset from that -// counter for this specific random number call, and key as unique -// index of the item being processed. -fn RandUint32N(counter: su64, funcIndex: u32, key: u32, n: u32) -> u32 { - let v = RandFloat32(counter, funcIndex, key); - return u32(v * f32(n)); -} - -// Counter is used for storing the random counter using aligned 16 byte -// storage, with convenience functions for typical use cases. -// It retains a copy of the last Seed value, which is applied to -// the Hi uint32 value. -struct RandCounter { - Counter: su64, - HiSeed: u32, - pad: u32, -} - -// Reset resets counter to last set Seed state. -fn RandCounter_Reset(ct: ptr) { - (*ct).Counter.x = u32(0); - (*ct).Counter.y = (*ct).HiSeed; -} - -// Seed sets the Hi uint32 value from given seed, saving it in Seed field. -// Each increment in seed generates a unique sequence of over 4 billion numbers, -// so it is reasonable to just use incremental values there, but more widely -// spaced numbers will result in longer unique sequences. -// Resets Lo to 0. -// This same seed will be restored during Reset -fn RandCounter_Seed(ct: ptr, seed: u32) { - (*ct).HiSeed = seed; - RandCounter_Reset(ct); -} - -// Add increments the counter by given amount. -// Call this after completing a pass of computation -// where the value passed here is the max of funcIndex+1 -// used for any possible random calls during that pass. -fn RandCounter_Add(ct: ptr, inc: u32) { - (*ct).Counter = Uint64Add32((*ct).Counter, inc); -} diff --git a/gpu/gosl/slrand/slrand_test.go b/gpu/gosl/slrand/slrand_test.go deleted file mode 100644 index 82c3d976af..0000000000 --- a/gpu/gosl/slrand/slrand_test.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slrand - -import ( - "fmt" - "testing" - - "cogentcore.org/core/gpu/gosl/sltype" - "github.com/stretchr/testify/assert" -) - -// Known Answer Test for values from the DEShawREsearch reference impl -func TestKAT(t *testing.T) { - kats := []struct { - ctr sltype.Uint32Vec2 - key uint32 - res sltype.Uint32Vec2 - }{{sltype.Uint32Vec2{0, 0}, 0, sltype.Uint32Vec2{0xff1dae59, 0x6cd10df2}}, - {sltype.Uint32Vec2{0xffffffff, 0xffffffff}, 0xffffffff, sltype.Uint32Vec2{0x2c3f628b, 0xab4fd7ad}}, - {sltype.Uint32Vec2{0x243f6a88, 0x85a308d3}, 0x13198a2e, sltype.Uint32Vec2{0xdd7ce038, 0xf62a4c12}}} - - for _, tv := range kats { - r := Philox2x32(sltype.Uint64FromLoHi(tv.ctr), tv.key) - if r != tv.res { - fmt.Printf("ctr: %v key: %d != result: %v -- got: %v\n", tv.ctr, tv.key, tv.res, r) - } - } -} - -func TestRand(t *testing.T) { - trgs := [][]float32{ - {0.9965466, -0.84816515, 0.10381041}, - {0.86274576, -0.25368667, 0.7390654}, - {0.018057441, -0.5596625, 0.87044024}, - {0.010364834, 0.38940117, 0.4972646}, - {0.75196105, 0.57544005, 0.37224847}, - {0.23327535, 0.4237375, 0.19016998}, - {0.50003797, 0.82759297, 0.6614841}, - {0.6322405, -0.21457514, 0.17761084}, - {0.59605914, 0.9313429, 0.257}, - {0.57019144, -0.32633832, 0.9563069}, - } - - var counter uint64 - for i := uint32(0); i < 10; i++ { - f, f11, fn := Float32(counter, i, 0), Float32Range11(counter, i, 1), Float32Norm(counter, i, 2) - // fmt.Printf("{%g, %g, %g},\n", f, f11, fn) - assert.Equal(t, trgs[i][0], f) - assert.Equal(t, trgs[i][1], f11) - assert.Equal(t, trgs[i][2], fn) - } -} - -func TestIntn(t *testing.T) { - var counter uint64 - n := uint32(20) - for i := uint32(0); i < 1000; i++ { - r := Uint32N(counter, i, 0, n) - if r >= n { - t.Errorf("r >= n: %d\n", r) - } - // fmt.Printf("%d\t%d\n", i, r) - } -} diff --git a/gpu/gosl/sltype/README.md b/gpu/gosl/sltype/README.md deleted file mode 100644 index 70f7635a23..0000000000 --- a/gpu/gosl/sltype/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# sltype - -Package `sltype` provides type definitions for standard WGSL types, using aliases to the equivalent [math32](https://cogentcore.org/core/math32) types where possible, but including other type names not defined there. - -These types will be converted to their equivalent WGSL types automatically by gosl, as will the corresponding `math32` type names. - diff --git a/gpu/gosl/sltype/float.go b/gpu/gosl/sltype/float.go deleted file mode 100644 index e4697ca2b1..0000000000 --- a/gpu/gosl/sltype/float.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) 2022, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sltype - -import "cogentcore.org/core/math32" - -// Float32Vec2 is a length 2 vector of float32 -type Float32Vec2 = math32.Vector2 - -// Float32Vec3 is a length 3 vector of float32 -type Float32Vec3 = math32.Vector3 - -// Float32Vec4 is a length 4 vector of float32 -type Float32Vec4 = math32.Vector4 diff --git a/gpu/gosl/sltype/int.go b/gpu/gosl/sltype/int.go deleted file mode 100644 index 1e45a558d3..0000000000 --- a/gpu/gosl/sltype/int.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2022, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sltype - -import "cogentcore.org/core/math32" - -// Int32Vec2 is a length 2 vector of int32 -type Int32Vec2 = math32.Vector2i - -// Int32Vec3 is a length 3 vector of int32 -type IntVec3 = math32.Vector3i - -// Int32Vec4 is a length 4 vector of int32 -type Int32Vec4 struct { - X int32 - Y int32 - Z int32 - W int32 -} - -//////////////////////////////////////// -// Unsigned - -// Uint32Vec2 is a length 2 vector of uint32 -type Uint32Vec2 struct { - X uint32 - Y uint32 -} - -// Uint32Vec3 is a length 3 vector of uint32 -type Uint32Vec3 struct { - X uint32 - Y uint32 - Z uint32 -} - -// Uint32Vec4 is a length 4 vector of uint32 -type Uint32Vec4 struct { - X uint32 - Y uint32 - Z uint32 - W uint32 -} - -func (u *Uint32Vec4) SetFromVec2(u2 Uint32Vec2) { - u.X = u2.X - u.Y = u2.Y - u.Z = 0 - u.W = 1 -} diff --git a/gpu/gosl/sltype/sltype.go b/gpu/gosl/sltype/sltype.go deleted file mode 100644 index fcd3c45191..0000000000 --- a/gpu/gosl/sltype/sltype.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sltype - -import ( - "math" -) - -// Uint32Mul64 multiplies two uint32 numbers into a uint64. -func Uint32Mul64(a, b uint32) uint64 { - return uint64(a) * uint64(b) -} - -// Uint64ToLoHi splits a uint64 number into lo and hi uint32 components. -func Uint64ToLoHi(a uint64) Uint32Vec2 { - var r Uint32Vec2 - r.Y = uint32(a >> 32) - r.X = uint32(a) - return r -} - -// Uint64FromLoHi combines lo and hi uint32 components into a uint64 value. -func Uint64FromLoHi(a Uint32Vec2) uint64 { - return uint64(a.X) + uint64(a.Y)<<32 -} - -// Uint64Add32 adds given uint32 number to given uint64. -func Uint64Add32(a uint64, b uint32) uint64 { - return a + uint64(b) -} - -// Uint64Incr returns increment of the given uint64. -func Uint64Incr(a uint64) uint64 { - return a + 1 -} - -// Uint32ToFloat32 converts a uint32 integer into a float32 -// in the (0,1) interval (i.e., exclusive of 1). -// This differs from the Go standard by excluding 0, which is handy for passing -// directly to Log function, and from the reference Philox code by excluding 1 -// which is in the Go standard and most other standard RNGs. -func Uint32ToFloat32(val uint32) float32 { - const factor = float32(1.) / (float32(0xffffffff) + float32(1.)) - const halffactor = float32(0.5) * factor - f := float32(val)*factor + halffactor - if f == 1 { // exclude 1 - return math.Float32frombits(0x3F7FFFFF) - } - return f -} - -// Uint32ToFloat32Vec2 converts two uint32 bit integers -// into two corresponding 32 bit f32 values -// in the (0,1) interval (i.e., exclusive of 1). -func Uint32ToFloat32Vec2(val Uint32Vec2) Float32Vec2 { - var r Float32Vec2 - r.X = Uint32ToFloat32(val.X) - r.Y = Uint32ToFloat32(val.Y) - return r -} - -// Uint32ToFloat32Range11 converts a uint32 integer into a float32 -// in the [-1..1] interval (inclusive of -1 and 1, never identically == 0). -func Uint32ToFloat32Range11(val uint32) float32 { - const factor = float32(1.) / (float32(0x7fffffff) + float32(1.)) - const halffactor = float32(0.5) * factor - return (float32(int32(val))*factor + halffactor) -} - -// Uint32ToFloat32Range11Vec2 converts two uint32 integers into two float32 -// in the [-1,1] interval (inclusive of -1 and 1, never identically == 0) -func Uint32ToFloat32Range11Vec2(val Uint32Vec2) Float32Vec2 { - var r Float32Vec2 - r.X = Uint32ToFloat32Range11(val.X) - r.Y = Uint32ToFloat32Range11(val.Y) - return r -} diff --git a/gpu/gosl/sltype/sltype.wgsl b/gpu/gosl/sltype/sltype.wgsl deleted file mode 100644 index e3ffe9e8e6..0000000000 --- a/gpu/gosl/sltype/sltype.wgsl +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Original file is in Go package: github.com/cogentcore/core/gpu/gosl/sltype -// See README.md there for documentation. - -// This file emulates uint64 (u64) using 2 uint32 integers. -// and defines conversions between uint and float. - -// define a u64 type as an alias. -// if / when u64 actually happens, will make it easier to update. -alias su64 = vec2; - -// Uint32Mul64 multiplies two uint32 numbers into a uint64 (using vec2). -fn Uint32Mul64(a: u32, b: u32) -> su64 { - let LOMASK = (((u32(1))<<16)-1); - var r: su64; - r.x = a * b; /* full low multiply */ - let ahi = a >> 16; - let alo = a & LOMASK; - let bhi = b >> 16; - let blo = b & LOMASK; - - let ahbl = ahi * blo; - let albh = alo * bhi; - - let ahbl_albh = ((ahbl&LOMASK) + (albh&LOMASK)); - var hit = ahi*bhi + (ahbl>>16) + (albh>>16); - hit += ahbl_albh >> 16; /* carry from the sum of lo(ahbl) + lo(albh) ) */ - /* carry from the sum with alo*blo */ - if ((r.x >> u32(16)) < (ahbl_albh&LOMASK)) { - hit += u32(1); - } - r.y = hit; - return r; -} - -/* -// Uint32Mul64 multiplies two uint32 numbers into a uint64 (using su64). -fn Uint32Mul64(a: u32, b: u32) -> su64 { - return su64(a) * su64(b); -} -*/ - - -// Uint64Add32 adds given uint32 number to given uint64 (using vec2). -fn Uint64Add32(a: su64, b: u32) -> su64 { - if (b == 0) { - return a; - } - var s = a; - if (s.x > u32(0xffffffff) - b) { - s.y++; - s.x = (b - 1) - (u32(0xffffffff) - s.x); - } else { - s.x += b; - } - return s; -} - -// Uint64Incr returns increment of the given uint64 (using vec2). -fn Uint64Incr(a: su64) -> su64 { - var s = a; - if(s.x == 0xffffffff) { - s.y++; - s.x = u32(0); - } else { - s.x++; - } - return s; -} - -// Uint32ToFloat32 converts a uint32 integer into a float32 -// in the (0,1) interval (i.e., exclusive of 1). -// This differs from the Go standard by excluding 0, which is handy for passing -// directly to Log function, and from the reference Philox code by excluding 1 -// which is in the Go standard and most other standard RNGs. -fn Uint32ToFloat32(val: u32) -> f32 { - let factor = f32(1.0) / (f32(u32(0xffffffff)) + f32(1.0)); - let halffactor = f32(0.5) * factor; - var f = f32(val) * factor + halffactor; - if (f == 1.0) { // exclude 1 - return bitcast(0x3F7FFFFF); - } - return f; -} - -// note: there is no overloading of user-defined functions -// https://github.com/gpuweb/gpuweb/issues/876 - -// Uint32ToFloat32Vec2 converts two uint 32 bit integers -// into two corresponding 32 bit f32 values -// in the (0,1) interval (i.e., exclusive of 1). -fn Uint32ToFloat32Vec2(val: vec2) -> vec2 { - var r: vec2; - r.x = Uint32ToFloat32(val.x); - r.y = Uint32ToFloat32(val.y); - return r; -} - -// Uint32ToFloat32Range11 converts a uint32 integer into a float32 -// in the [-1..1] interval (inclusive of -1 and 1, never identically == 0). -fn Uint32ToFloat32Range11(val: u32) -> f32 { - let factor = f32(1.0) / (f32(i32(0x7fffffff)) + f32(1.0)); - let halffactor = f32(0.5) * factor; - return (f32(val) * factor + halffactor); -} - -// Uint32ToFloat32Range11Vec2 converts two uint32 integers into two float32 -// in the [-1,1] interval (inclusive of -1 and 1, never identically == 0). -fn Uint32ToFloat32Range11Vec2(val: vec2) -> vec2 { - var r: vec2; - r.x = Uint32ToFloat32Range11(val.x); - r.y = Uint32ToFloat32Range11(val.y); - return r; -} - - diff --git a/gpu/gosl/sltype/sltype_test.go b/gpu/gosl/sltype/sltype_test.go deleted file mode 100644 index d19da2052d..0000000000 --- a/gpu/gosl/sltype/sltype_test.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sltype - -import ( - "fmt" - "math" - "testing" -) - -// Float01 Known Answer Test for float conversion values from the DEShawREsearch reference impl -func TestFloat01KAT(t *testing.T) { - minint := math.MinInt32 - kats := []struct { - base uint32 - add uint32 - res float32 - }{{0, 0, 1.16415321826934814453e-10}, - {uint32(minint), 0, 0.5}, - {math.MaxInt32, 0, 0.5}, - {math.MaxUint32, 0, 0.99999994}, - } - - for _, tv := range kats { - r := Uint32ToFloat32(tv.base + tv.add) - if r != tv.res { - fmt.Printf("base: %x add: %x != result: %g -- got: %g\n", tv.base, tv.add, tv.res, r) - } - } -} - -// Float11 Known Answer Test for float conversion values from the DEShawREsearch reference impl -func TestFloat11KAT(t *testing.T) { - minint := math.MinInt32 - kats := []struct { - base uint32 - add uint32 - res float32 - }{{0, 0, 2.32830643653869628906e-10}, - {uint32(minint), 0, -1.0}, - {math.MaxInt32, 0, 1.0}, - {math.MaxUint32, 0, -2.32830643653869628906e-10}, - } - - for _, tv := range kats { - r := Uint32ToFloat32Range11(tv.base + tv.add) - if r != tv.res { - fmt.Printf("base: %x add: %x != result: %g -- got: %g\n", tv.base, tv.add, tv.res, r) - } - } -} - -func TestCounter(t *testing.T) { - counter := Uint64FromLoHi(Uint32Vec2{X: 0xfffffffe, Y: 0}) - ctr := counter - ctr = Uint64Add32(ctr, 4) - ctrlh := Uint64ToLoHi(ctr) - if ctrlh.X != 2 && ctrlh.Y != 1 { - t.Errorf("Should be 2, 1: %v\n", ctrlh) - } - ctr = counter - ctr = Uint64Add32(ctr, 1) - ctrlh = Uint64ToLoHi(ctr) - if ctrlh.X != 0xffffffff && ctrlh.Y == 0 { - t.Errorf("Should be 0, 0xfffffffe: %v\n", ctrlh) - } - ctr = counter - ctr = Uint64Add32(ctr, 1) - ctrlh = Uint64ToLoHi(ctr) - if ctrlh.X != 0 && ctrlh.Y == 1 { - t.Errorf("Should be 0, 1: %v\n", ctrlh) - } - ctr = counter - ctr = Uint64Incr(ctr) - ctr = Uint64Incr(ctr) - ctrlh = Uint64ToLoHi(ctr) - if ctrlh.X != 0 && ctrlh.Y != 1 { - t.Errorf("Should be 0, 1: %v\n", ctrlh) - } -} diff --git a/gpu/gosl/testdata/basic.go b/gpu/gosl/testdata/basic.go deleted file mode 100644 index 72a3610c95..0000000000 --- a/gpu/gosl/testdata/basic.go +++ /dev/null @@ -1,199 +0,0 @@ -package test - -import ( - "math" - - "cogentcore.org/core/math32" - "cogentcore.org/core/vgpu/gosl/slbool" -) - -// note: this code is included in the go pre-processing output but -// then removed from the final wgsl output. -// Use when you need different versions of the same function for CPU vs. GPU - -// MyTrickyFun this is the CPU version of the tricky function -func MyTrickyFun(x float32) float32 { - return 10 // ok actually not tricky here, but whatever -} - -//gosl:wgsl basic - -// // note: here is the wgsl version, only included in wgsl - -// // MyTrickyFun this is the GPU version of the tricky function -// fn MyTrickyFun(x: f32) -> f32 { -// return 16.0; // ok actually not tricky here, but whatever -// } - -//gosl:end basic - -//gosl:start basic - -// FastExp is a quartic spline approximation to the Exp function, by N.N. Schraudolph -// It does not have any of the sanity checking of a standard method -- returns -// nonsense when arg is out of range. Runs in 2.23ns vs. 6.3ns for 64bit which is faster -// than math32.Exp actually. -func FastExp(x float32) float32 { - if x <= -88.76731 { // this doesn't add anything and -exp is main use-case anyway - return 0 - } - i := int32(12102203*x) + int32(127)*(int32(1)<<23) - m := i >> 7 & 0xFFFF // copy mantissa - i += (((((((((((3537 * m) >> 16) + 13668) * m) >> 18) + 15817) * m) >> 14) - 80470) * m) >> 11) - return math.Float32frombits(uint32(i)) -} - -// NeuronFlags are bit-flags encoding relevant binary state for neurons -type NeuronFlags int32 - -// The neuron flags -const ( - // NeuronOff flag indicates that this neuron has been turned off (i.e., lesioned) - NeuronOff NeuronFlags = 0x01 - - // NeuronHasExt means the neuron has external input in its Ext field - NeuronHasExt NeuronFlags = 0x02 // note: 1<<2 does NOT work - - // NeuronHasTarg means the neuron has external target input in its Target field - NeuronHasTarg NeuronFlags = 0x04 - - // NeuronHasCmpr means the neuron has external comparison input in its Target field -- used for computing - // comparison statistics but does not drive neural activity ever - NeuronHasCmpr NeuronFlags = 0x08 -) - -// Modes are evaluation modes (Training, Testing, etc) -type Modes int32 - -// The evaluation modes -const ( - NoEvalMode Modes = iota - - // AllModes indicates that the log should occur over all modes present in other items. - AllModes - - // Train is this a training mode for the env - Train - - // Test is this a test mode for the env - Test -) - -// DataStruct has the test data -type DataStruct struct { - - // raw value - Raw float32 - - // integrated value - Integ float32 - - // exp of integ - Exp float32 - - pad float32 -} - -// SubParamStruct has the test sub-params -type SubParamStruct struct { - A, B, C, D float32 -} - -func (sp *SubParamStruct) Sum() float32 { - return sp.A + sp.B + sp.C + sp.D -} - -func (sp *SubParamStruct) SumPlus(extra float32) float32 { - return sp.Sum() + extra -} - -// ParamStruct has the test params -type ParamStruct struct { - - // rate constant in msec - Tau float32 - - // 1/Tau - Dt float32 - Option slbool.Bool // note: standard bool doesn't work - - pad float32 // comment this out to trigger alignment warning - - // extra parameters - Subs SubParamStruct -} - -func (ps *ParamStruct) IntegFromRaw(ds *DataStruct) float32 { - // note: the following are just to test basic control structures - newVal := ps.Dt * (ds.Raw - ds.Integ) - if newVal < -10 || ps.Option.IsTrue() { - newVal = -10 - } - ds.Integ += newVal - ds.Exp = math32.Exp(-ds.Integ) - var a float32 - ps.AnotherMeth(ds, &a) - return ds.Exp -} - -// AnotherMeth does more computation -func (ps *ParamStruct) AnotherMeth(ds *DataStruct, ptrarg *float32) { - for i := 0; i < 10; i++ { - ds.Integ *= 0.99 - } - var flag NeuronFlags - flag &^= NeuronHasExt // clear flag -- op doesn't exist in C - - mode := Test - switch mode { // note: no fallthrough! - case Test: - ab := float32(42) - ds.Exp /= ab - case Train: - ab := float32(.5) - ds.Exp *= ab - default: - ab := float32(1) - ds.Exp *= ab - } - - var a, b float32 - b = 42 - a = ps.Subs.Sum() - ds.Exp = ps.Subs.SumPlus(b) - ds.Integ = a - - *ptrarg = -1 -} - -//gosl:end basic - -// note: only core compute code needs to be in shader -- all init is done CPU-side - -func (ps *ParamStruct) Defaults() { - ps.Tau = 5 - ps.Update() -} - -func (ps *ParamStruct) Update() { - ps.Dt = 1.0 / ps.Tau -} - -//gosl:wgsl basic -/* -@group(0) @binding(0) -var Params: array; - -@group(0) @binding(1) -var Data: array; - -@compute -@workgroup_size(64) -fn main(@builtin(global_invocation_id) idx: vec3) { - var pars = Params[0]; - var data = Data[idx.x]; - ParamStruct_IntegFromRaw(&pars, &data); - Data[idx.x] = data; -} -*/ -//gosl:end basic diff --git a/gpu/gosl/testdata/basic.golden b/gpu/gosl/testdata/basic.golden deleted file mode 100644 index feabd4e16a..0000000000 --- a/gpu/gosl/testdata/basic.golden +++ /dev/null @@ -1,166 +0,0 @@ - - -// note: here is the wgsl version, only included in wgsl - -// MyTrickyFun this is the GPU version of the tricky function -fn MyTrickyFun(x: f32) -> f32 { - return 16.0; // ok actually not tricky here, but whatever -} - - -// FastExp is a quartic spline approximation to the Exp function, by N.N. Schraudolph -// It does not have any of the sanity checking of a standard method -- returns -// nonsense when arg is out of range. Runs in 2.23ns vs. 6.3ns for 64bit which is faster -// than exp actually. -fn FastExp(x: f32) -> f32 { - if (x <= -88.76731) { // this doesn't add anything and -exp is main use-case anyway - return f32(0); - } - var i = i32(12102203*x) + i32(127)*(i32(1)<<23); - var m = i >> 7 & 0xFFFF; // copy mantissa - i += (((((((((((3537 * m) >> 16) + 13668) * m) >> 18) + 15817) * m) >> 14) - 80470) * m) >> 11); - return bitcast(u32(i)); -} - -// NeuronFlags are bit-flags encoding relevant binary state for neurons -alias NeuronFlags = i32; - -// The neuron flags - -// NeuronOff flag indicates that this neuron has been turned off (i.e., lesioned) -const NeuronOff: NeuronFlags = 0x01; - -// NeuronHasExt means the neuron has external input in its Ext field -const NeuronHasExt: NeuronFlags = 0x02; // note: 1<<2 does NOT work - -// NeuronHasTarg means the neuron has external target input in its Target field -const NeuronHasTarg: NeuronFlags = 0x04; - -// NeuronHasCmpr means the neuron has external comparison input in its Target field -- used for computing -// comparison statistics but does not drive neural activity ever -const NeuronHasCmpr: NeuronFlags = 0x08; - -// Modes are evaluation modes (Training, Testing, etc) -alias Modes = i32; - -// The evaluation modes - -const NoEvalMode: Modes = 0; - -// AllModes indicates that the log should occur over all modes present in other items. -const AllModes: Modes = 1; - -// Train is this a training mode for the env -const Train: Modes = 2; - -// Test is this a test mode for the env -const Test: Modes = 3; - -// DataStruct has the test data -struct DataStruct { - - // raw value - Raw: f32, - - // integrated value - Integ: f32, - - // exp of integ - Exp: f32, - - pad: f32, -} - -// SubParamStruct has the test sub-params -struct SubParamStruct { - A: f32, - B: f32, - C: f32, - D: f32, -} - -fn SubParamStruct_Sum(sp: ptr) -> f32 { - return (*sp).A + (*sp).B + (*sp).C + (*sp).D; -} - -fn SubParamStruct_SumPlus(sp: ptr, extra: f32) -> f32 { - return SubParamStruct_Sum(sp) + extra; -} - -// ParamStruct has the test params -struct ParamStruct { - - // rate constant in msec - Tau: f32, - - // 1/Tau - Dt: f32, - Option: i32, // note: standard bool doesn't work - - pad: f32, // comment this out to trigger alignment warning - - // extra parameters - Subs: SubParamStruct, -} - -fn ParamStruct_IntegFromRaw(ps: ptr, ds: ptr) -> f32 { - // note: the following are just to test basic control structures - var newVal = (*ps).Dt * ((*ds).Raw - (*ds).Integ); - if (newVal < -10 || (*ps).Option == 1) { - newVal = f32(-10); - } - (*ds).Integ += newVal; - (*ds).Exp = exp(-(*ds).Integ); - var a: f32; - ParamStruct_AnotherMeth(ps, ds, &a); - return (*ds).Exp; -} - -// AnotherMeth does more computation -fn ParamStruct_AnotherMeth(ps: ptr, ds: ptr, ptrarg: ptr) { - for (var i = 0; i < 10; i++) { - (*ds).Integ *= f32(0.99); - } - var flag: NeuronFlags; - flag &= ~NeuronHasExt; // clear flag -- op doesn't exist in C - - var mode = Test; - switch (mode) { // note: no fallthrough! - case Test: { - var ab = f32(42); - (*ds).Exp /= ab; - } - case Train: { - var ab = f32(.5); - (*ds).Exp *= ab; - } - default: { - var ab = f32(1); - (*ds).Exp *= ab; - } - } - - var a: f32; - var b: f32; - b = f32(42); - a = SubParamStruct_Sum(&(*ps).Subs); - (*ds).Exp = SubParamStruct_SumPlus(&(*ps).Subs, b); - (*ds).Integ = a; - - *ptrarg = f32(-1); -} - -@group(0) @binding(0) -var Params: array; - -@group(0) @binding(1) -var Data: array; - -@compute -@workgroup_size(64) -fn main(@builtin(global_invocation_id) idx: vec3) { - var pars = Params[0]; - var data = Data[idx.x]; - ParamStruct_IntegFromRaw(&pars, &data); - Data[idx.x] = data; -} diff --git a/gpu/gosl/threading/threading.go b/gpu/gosl/threading/threading.go deleted file mode 100644 index c4ce24b443..0000000000 --- a/gpu/gosl/threading/threading.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package threading provides a simple parallel run function. this will be moved elsewhere. -package threading - -import ( - "math" - "sync" -) - -// Maps the given function across the [0, total) range of items, using -// nThreads goroutines. -func ParallelRun(fun func(st, ed int), total int, nThreads int) { - itemsPerThr := int(math.Ceil(float64(total) / float64(nThreads))) - waitGroup := sync.WaitGroup{} - for start := 0; start < total; start += itemsPerThr { - start := start // be extra sure with closure - end := min(start+itemsPerThr, total) - waitGroup.Add(1) // todo: move out of loop - go func() { - fun(start, end) - waitGroup.Done() - }() - } - waitGroup.Wait() -} diff --git a/gpu/value.go b/gpu/value.go index 922f8cc261..fe01ba7db5 100644 --- a/gpu/value.go +++ b/gpu/value.go @@ -30,10 +30,17 @@ type Value struct { // index of this value within the Var list of values Index int - // VarSize is the size of each Var element, which includes any fixed ArrayN + // VarSize is the size of each Var element, which includes any fixed Var.ArrayN // array size specified on the Var. + // The actual buffer size is VarSize * Value.ArrayN (or DynamicN for dynamic). VarSize int + // ArrayN is the actual number of array elements, for Uniform or Storage + // variables without a fixed array size (i.e., the Var ArrayN = 1). + // This is set when the buffer is actually created, based on the data, + // or can be set directly prior to buffer creation. + ArrayN int + // DynamicIndex is the current index into a DynamicOffset variable // to use for the SetBindGroup call. Note that this is an index, // not an offset, so it indexes the DynamicN Vars in the Value, @@ -103,6 +110,16 @@ func MemSizeAlign(size, align int) int { return (nb + 1) * align } +// MemSizeAlignDown returns the size aligned according to align byte increments, +// rounding down, not up. +func MemSizeAlignDown(size, align int) int { + if size%align == 0 { + return size + } + nb := size / align + return nb * align +} + // init initializes value based on variable and index // within list of vals for this var. func (vl *Value) init(vr *Var, dev *Device, idx int) { @@ -112,6 +129,7 @@ func (vl *Value) init(vr *Var, dev *Device, idx int) { vl.Index = idx vl.Name = fmt.Sprintf("%s_%d", vr.Name, vl.Index) vl.VarSize = vr.MemSize() + vl.ArrayN = 1 vl.alignBytes = vr.alignBytes vl.AlignVarSize = MemSizeAlign(vl.VarSize, vl.alignBytes) vl.isDynamic = vl.role == Vertex || vl.role == Index || vr.DynamicOffset @@ -121,6 +139,10 @@ func (vl *Value) init(vr *Var, dev *Device, idx int) { } } +func (vl *Value) String() string { + return fmt.Sprintf("Bytes: 0x%X", vl.MemSize()) +} + // MemSize returns the memory allocation size for this value, in bytes. func (vl *Value) MemSize() int { if vl.Texture != nil { @@ -129,11 +151,12 @@ func (vl *Value) MemSize() int { if vl.isDynamic { return vl.AlignVarSize * vl.dynamicN } - return vl.VarSize + return vl.ArrayN * vl.VarSize } // CreateBuffer creates the GPU buffer for this value if it does not // yet exist or is not the right size. +// For !ReadOnly [Storage] buffers, calls [Value.CreateReadBuffer]. func (vl *Value) CreateBuffer() error { if vl.role == SampledTexture { return nil @@ -151,7 +174,7 @@ func (vl *Value) CreateBuffer() error { buf, err := vl.device.Device.CreateBuffer(&wgpu.BufferDescriptor{ Size: uint64(sz), Label: vl.Name, - Usage: vl.role.BufferUsages(), + Usage: vl.vvar.bufferUsages(), MappedAtCreation: false, }) if errors.Log(err) != nil { @@ -159,6 +182,9 @@ func (vl *Value) CreateBuffer() error { } vl.AllocSize = sz vl.buffer = buf + if vl.role == Storage && !vl.vvar.ReadOnly { + vl.CreateReadBuffer() + } return nil } @@ -214,6 +240,9 @@ func (vl *Value) SetDynamicN(n int) { // SetValueFrom copies given values into value buffer memory, // making the buffer if it has not yet been constructed. +// The actual ArrayN size of Storage or Uniform variables will +// be computed based on the size of the from bytes, relative to +// the variable size. // IMPORTANT: do not use this for dynamic offset Uniform or // Storage variables, as the alignment will not be correct; // See [SetDynamicFromBytes]. @@ -223,6 +252,7 @@ func SetValueFrom[E any](vl *Value, from []E) error { // SetFromBytes copies given bytes into value buffer memory, // making the buffer if it has not yet been constructed. +// For !ReadOnly [Storage] buffers, calls [Value.CreateReadBuffer]. // IMPORTANT: do not use this for dynamic offset Uniform or // Storage variables, as the alignment will not be correct; // See [SetDynamicFromBytes]. @@ -232,12 +262,19 @@ func (vl *Value) SetFromBytes(from []byte) error { return errors.Log(err) } nb := len(from) + an := nb / vl.VarSize + aover := nb % vl.VarSize + if aover != 0 { + err := fmt.Errorf("gpu.Value SetFromBytes %s, Size passed: %d is not an even multiple of the variable size: %d", vl.Name, nb, vl.VarSize) + return errors.Log(err) + } if vl.isDynamic { // Vertex, Index at this point - dn := nb / vl.VarSize - vl.SetDynamicN(dn) + vl.SetDynamicN(an) + } else { + vl.ArrayN = an } tb := vl.MemSize() - if nb != tb { + if nb != tb { // this should never happen, but justin case err := fmt.Errorf("gpu.Value SetFromBytes %s, Size passed: %d != Size expected %d", vl.Name, nb, tb) return errors.Log(err) } @@ -247,13 +284,16 @@ func (vl *Value) SetFromBytes(from []byte) error { buf, err := vl.device.Device.CreateBufferInit(&wgpu.BufferInitDescriptor{ Label: vl.Name, Contents: from, - Usage: vl.role.BufferUsages(), + Usage: vl.vvar.bufferUsages(), }) if errors.Log(err) != nil { return err } vl.buffer = buf vl.AllocSize = nb + if vl.role == Storage && !vl.vvar.ReadOnly { + vl.CreateReadBuffer() + } } else { err := vl.device.Queue.WriteBuffer(vl.buffer, 0, from) if errors.Log(err) != nil { @@ -320,7 +360,7 @@ func (vl *Value) WriteDynamicBuffer() error { buf, err := vl.device.Device.CreateBufferInit(&wgpu.BufferInitDescriptor{ Label: vl.Name, Contents: vl.dynamicBuffer, - Usage: vl.role.BufferUsages(), + Usage: vl.vvar.bufferUsages(), }) if errors.Log(err) != nil { return err @@ -406,11 +446,10 @@ func (vl *Value) SetFromTexture(tx *Texture) *Texture { } // CreateReadBuffer creates a read buffer for this value, -// if it does not yet exist or is not the right size. +// for [Storage] values only. Automatically called for !ReadOnly. // Read buffer is needed for reading values back from the GPU. -// Only for Storage role variables. func (vl *Value) CreateReadBuffer() error { - if !(vl.role == Storage || vl.role == StorageTexture) { + if !(vl.role == Storage || vl.role == StorageTexture) || vl.vvar.ReadOnly { return nil } sz := vl.MemSize() diff --git a/gpu/values.go b/gpu/values.go index 695db559ed..193c6da697 100644 --- a/gpu/values.go +++ b/gpu/values.go @@ -147,18 +147,6 @@ func (vs *Values) MemSize() int { return tsz } -// CreateReadBuffers creates read buffers for all values. -func (vs *Values) CreateReadBuffers() error { - var errs []error - for _, vl := range vs.Values { - err := vl.CreateReadBuffer() - if err != nil { - errs = append(errs, err) - } - } - return errors.Join(errs...) -} - // bindGroupEntry returns the BindGroupEntry for Current // value for this variable. func (vs *Values) bindGroupEntry(vr *Var) []wgpu.BindGroupEntry { diff --git a/gpu/var.go b/gpu/var.go index 0845aa28ab..b7a4ef7f6e 100644 --- a/gpu/var.go +++ b/gpu/var.go @@ -35,13 +35,12 @@ type Var struct { // automatically be sent as 4 interleaved Float32Vector4 chuncks. Type Types - // number of elements, which is 1 for a single element, or a constant - // number for a fixed array of elements. For Vertex variables, the - // number is dynamic and does not need to be specified in advance, - // so you can leave it at 1. There can be alignment issues with arrays + // ArrayN is the number of elements in an array, only if there is a + // fixed array size. Otherwise, for single elements or dynamic arrays + // use a value of 1. There can be alignment issues with arrays // so make sure your elemental types are compatible. // Note that DynamicOffset variables can have Value buffers with multiple - // instances of the variable (with proper alignment stride), which is + // instances of the variable (with proper alignment stride), // which goes on top of any array value for the variable itself. ArrayN int @@ -87,6 +86,11 @@ type Var struct { // Only for Uniform and Storage variables. DynamicOffset bool + // ReadOnly applies only to [Storage] variables, and indicates that + // they are never read back from the GPU, so the additional staging + // buffers needed to do so are not created for these variables. + ReadOnly bool + // Values is the the array of Values allocated for this variable. // Each value has its own corresponding Buffer or Texture. // The currently-active Value is specified by the Current index, @@ -137,6 +141,9 @@ func (vr *Var) String() string { } } s := fmt.Sprintf("%d:\t%s\t%s\t(size: %d)\tValues: %d", vr.Binding, vr.Name, typ, vr.SizeOf, len(vr.Values.Values)) + if len(vr.Values.Values) == 1 { + s += "\t" + vr.Values.Values[0].String() + } return s } @@ -145,7 +152,6 @@ func (vr *Var) MemSize() int { if vr.ArrayN < 1 { vr.ArrayN = 1 } - // todo: may need to diagnose alignments here.. switch { case vr.Role >= SampledTexture: return 0 @@ -157,7 +163,6 @@ func (vr *Var) MemSize() int { // Release resets the MemPtr for values, resets any self-owned resources (Textures) func (vr *Var) Release() { vr.Values.Release() - // todo: free anything in var } // SetNValues sets specified number of Values for this var. @@ -177,3 +182,17 @@ func (vr *Var) SetCurrentValue(i int) { func (vr *Var) bindGroupEntry() []wgpu.BindGroupEntry { return vr.Values.bindGroupEntry(vr) } + +func (vr *Var) bindingType() wgpu.BufferBindingType { + if vr.Role == Storage && vr.ReadOnly { + return wgpu.BufferBindingTypeReadOnlyStorage + } + return vr.Role.BindingType() +} + +func (vr *Var) bufferUsages() wgpu.BufferUsage { + if vr.Role == Storage && vr.ReadOnly { + return wgpu.BufferUsageStorage | wgpu.BufferUsageCopyDst + } + return vr.Role.BufferUsages() +} diff --git a/gpu/vargroup.go b/gpu/vargroup.go index a676ad6610..6ab44a2c05 100644 --- a/gpu/vargroup.go +++ b/gpu/vargroup.go @@ -166,18 +166,6 @@ func (vg *VarGroup) SetAllCurrentValue(i int) { } } -// CreateReadBuffers creates read buffers for all values. -func (vg *VarGroup) CreateReadBuffers() error { - var errs []error - for _, vr := range vg.Vars { - err := vr.Values.CreateReadBuffers() - if err != nil { - errs = append(errs, err) - } - } - return errors.Join(errs...) -} - // Config must be called after all variables have been added. // Configures binding / location for all vars based on sequential order. // also does validation and returns error message. @@ -277,7 +265,7 @@ func (vg *VarGroup) bindLayout(vs *Vars) (*wgpu.BindGroupLayout, error) { } default: bd.Buffer = wgpu.BufferBindingLayout{ - Type: vr.Role.BindingType(), + Type: vr.bindingType(), HasDynamicOffset: false, MinBindingSize: 0, // 0 is fine } diff --git a/gpu/vars.go b/gpu/vars.go index 9580a95ce3..2e1720b94d 100644 --- a/gpu/vars.go +++ b/gpu/vars.go @@ -169,27 +169,6 @@ func (vs *Vars) SetDynamicIndex(group int, name string, dynamicIndex int) *Var { return vr } -// CreateReadBuffers creates read buffers for all Storage variables. -// This is needed to be able to read values back from GPU (e.g., for Compute). -func (vs *Vars) CreateReadBuffers() error { - var errs []error - ns := vs.NGroups() - for gi := vs.StartGroup(); gi < ns; gi++ { - vg := vs.Groups[gi] - if vg == nil { - continue - } - if vg.Role != Storage { - continue - } - err := vg.CreateReadBuffers() - if err != nil { - errs = append(errs, err) - } - } - return errors.Join(errs...) -} - // Config must be called after all variables have been added. // Configures all Groups and also does validation, returning error // does DescLayout too, so all ready for Pipeline config. diff --git a/math32/fastexp.go b/math32/fastexp.go index 8bd1f3046b..2af370458d 100644 --- a/math32/fastexp.go +++ b/math32/fastexp.go @@ -65,7 +65,7 @@ func FastExp3(x float32) float32 { } */ -//gosl:start fastexp +//gosl:start // FastExp is a quartic spline approximation to the Exp function, by N.N. Schraudolph // It does not have any of the sanity checking of a standard method -- returns @@ -76,9 +76,9 @@ func FastExp(x float32) float32 { return 0.0 } i := int32(12102203*x) + int32(127)*(int32(1)<<23) - m := i >> 7 & 0xFFFF // copy mantissa + m := (i >> 7) & 0xFFFF // copy mantissa i += (((((((((((3537 * m) >> 16) + 13668) * m) >> 18) + 15817) * m) >> 14) - 80470) * m) >> 11) return math.Float32frombits(uint32(i)) } -//gosl:end fastexp +//gosl:end diff --git a/math32/math.go b/math32/math.go index 0f4413e3f8..b67e02150e 100644 --- a/math32/math.go +++ b/math32/math.go @@ -15,6 +15,7 @@ package math32 //go:generate core generate import ( + "cmp" "math" "strconv" @@ -783,18 +784,7 @@ func Yn(n int, x float32) float32 { // Special additions to math. functions // Clamp clamps x to the provided closed interval [a, b] -func Clamp(x, a, b float32) float32 { - if x < a { - return a - } - if x > b { - return b - } - return x -} - -// ClampInt clamps x to the provided closed interval [a, b] -func ClampInt(x, a, b int) int { +func Clamp[T cmp.Ordered](x, a, b T) T { if x < a { return a } diff --git a/math32/minmax/avgmax.go b/math32/minmax/avgmax.go index 7877ccd4d2..72fb1dbe7a 100644 --- a/math32/minmax/avgmax.go +++ b/math32/minmax/avgmax.go @@ -4,9 +4,12 @@ package minmax -import "fmt" +import ( + "fmt" + "math" +) -//gosl:start minmax +//gosl:start const ( MaxFloat32 float32 = 3.402823466e+38 @@ -69,7 +72,7 @@ func (am *AvgMax32) CalcAvg() { } } -//gosl:end minmax +//gosl:end func (am *AvgMax32) String() string { return fmt.Sprintf("{Avg: %g, Max: %g, Sum: %g, MaxIndex: %d, N: %d}", am.Avg, am.Max, am.Sum, am.MaxIndex, am.N) @@ -114,7 +117,7 @@ func (am *AvgMax64) Init() { am.Avg = 0 am.Sum = 0 am.N = 0 - am.Max = -MaxFloat64 + am.Max = math.Inf(-1) am.MaxIndex = -1 } diff --git a/math32/minmax/minmax32.go b/math32/minmax/minmax32.go index 2f5d9fff36..0b9207cd5e 100644 --- a/math32/minmax/minmax32.go +++ b/math32/minmax/minmax32.go @@ -4,9 +4,13 @@ package minmax -import "fmt" +import ( + "fmt" -//gosl:start minmax + "cogentcore.org/core/math32" +) + +//gosl:start // F32 represents a min / max range for float32 values. // Supports clipping, renormalizing, etc @@ -23,11 +27,11 @@ func (mr *F32) Set(mn, mx float32) { mr.Max = mx } -// SetInfinity sets the Min to +MaxFloat, Max to -MaxFloat -- suitable for -// iteratively calling Fit*InRange +// SetInfinity sets the Min to +Inf, Max to -Inf -- suitable for +// iteratively calling Fit*InRange. See also Sanitize when done. func (mr *F32) SetInfinity() { - mr.Min = MaxFloat32 - mr.Max = -MaxFloat32 + mr.Min = math32.Inf(1) + mr.Max = math32.Inf(-1) } // IsValid returns true if Min <= Max @@ -87,7 +91,7 @@ func (mr *F32) FitValInRange(val float32) bool { // NormVal normalizes value to 0-1 unit range relative to current Min / Max range // Clips the value within Min-Max range first. func (mr *F32) NormValue(val float32) float32 { - return (mr.ClipValue(val) - mr.Min) * mr.Scale() + return (mr.ClampValue(val) - mr.Min) * mr.Scale() } // ProjVal projects a 0-1 normalized unit value into current Min / Max range (inverse of NormVal) @@ -95,9 +99,9 @@ func (mr *F32) ProjValue(val float32) float32 { return mr.Min + (val * mr.Range()) } -// ClipVal clips given value within Min / Max range -// Note: a NaN will remain as a NaN -func (mr *F32) ClipValue(val float32) float32 { +// ClampValue clamps given value within Min / Max range +// Note: a NaN will remain as a NaN. +func (mr *F32) ClampValue(val float32) float32 { if val < mr.Min { return mr.Min } @@ -119,7 +123,7 @@ func (mr *F32) ClipNormValue(val float32) float32 { return mr.NormValue(val) } -//gosl:end minmax +//gosl:end func (mr *F32) String() string { return fmt.Sprintf("{%g %g}", mr.Min, mr.Max) @@ -139,3 +143,20 @@ func (mr *F32) FitInRange(oth F32) bool { } return adj } + +// Sanitize ensures that the Min / Max range is not infinite or contradictory. +func (mr *F32) Sanitize() { + if math32.IsInf(mr.Min, 0) { + mr.Min = 0 + } + if math32.IsInf(mr.Max, 0) { + mr.Max = 0 + } + if mr.Min > mr.Max { + mr.Min, mr.Max = mr.Max, mr.Min + } + if mr.Min == mr.Max { + mr.Min-- + mr.Max++ + } +} diff --git a/math32/minmax/minmax64.go b/math32/minmax/minmax64.go index 046c7c3958..9386a9e72c 100644 --- a/math32/minmax/minmax64.go +++ b/math32/minmax/minmax64.go @@ -5,12 +5,9 @@ // Package minmax provides a struct that holds Min and Max values. package minmax -//go:generate core generate +import "math" -const ( - MaxFloat64 float64 = 1.7976931348623158e+308 - MinFloat64 float64 = 2.2250738585072014e-308 -) +//go:generate core generate // F64 represents a min / max range for float64 values. // Supports clipping, renormalizing, etc @@ -25,39 +22,39 @@ func (mr *F64) Set(mn, mx float64) { mr.Max = mx } -// SetInfinity sets the Min to +MaxFloat, Max to -MaxFloat -- suitable for -// iteratively calling Fit*InRange +// SetInfinity sets the Min to +Inf, Max to -Inf, suitable for +// iteratively calling Fit*InRange. See also Sanitize when done. func (mr *F64) SetInfinity() { - mr.Min = MaxFloat64 - mr.Max = -MaxFloat64 + mr.Min = math.Inf(1) + mr.Max = math.Inf(-1) } -// IsValid returns true if Min <= Max +// IsValid returns true if Min <= Max. func (mr *F64) IsValid() bool { return mr.Min <= mr.Max } -// InRange tests whether value is within the range (>= Min and <= Max) +// InRange tests whether value is within the range (>= Min and <= Max). func (mr *F64) InRange(val float64) bool { return ((val >= mr.Min) && (val <= mr.Max)) } -// IsLow tests whether value is lower than the minimum +// IsLow tests whether value is lower than the minimum. func (mr *F64) IsLow(val float64) bool { return (val < mr.Min) } -// IsHigh tests whether value is higher than the maximum +// IsHigh tests whether value is higher than the maximum. func (mr *F64) IsHigh(val float64) bool { return (val > mr.Min) } -// Range returns Max - Min +// Range returns Max - Min. func (mr *F64) Range() float64 { return mr.Max - mr.Min } -// Scale returns 1 / Range -- if Range = 0 then returns 0 +// Scale returns 1 / Range -- if Range = 0 then returns 0. func (mr *F64) Scale() float64 { r := mr.Range() if r != 0 { @@ -89,7 +86,7 @@ func (mr *F64) FitValInRange(val float64) bool { // NormVal normalizes value to 0-1 unit range relative to current Min / Max range // Clips the value within Min-Max range first. func (mr *F64) NormValue(val float64) float64 { - return (mr.ClipValue(val) - mr.Min) * mr.Scale() + return (mr.ClampValue(val) - mr.Min) * mr.Scale() } // ProjVal projects a 0-1 normalized unit value into current Min / Max range (inverse of NormVal) @@ -97,9 +94,9 @@ func (mr *F64) ProjValue(val float64) float64 { return mr.Min + (val * mr.Range()) } -// ClipVal clips given value within Min / Max range +// ClampValue clips given value within Min / Max range // Note: a NaN will remain as a NaN -func (mr *F64) ClipValue(val float64) float64 { +func (mr *F64) ClampValue(val float64) float64 { if val < mr.Min { return mr.Min } @@ -135,3 +132,20 @@ func (mr *F64) FitInRange(oth F64) bool { } return adj } + +// Sanitize ensures that the Min / Max range is not infinite or contradictory. +func (mr *F64) Sanitize() { + if math.IsInf(mr.Min, 0) { + mr.Min = 0 + } + if math.IsInf(mr.Max, 0) { + mr.Max = 0 + } + if mr.Min > mr.Max { + mr.Min, mr.Max = mr.Max, mr.Min + } + if mr.Min == mr.Max { + mr.Min-- + mr.Max++ + } +} diff --git a/math32/minmax/minmax_int.go b/math32/minmax/minmax_int.go index 704e5b9c79..fef6453e09 100644 --- a/math32/minmax/minmax_int.go +++ b/math32/minmax/minmax_int.go @@ -96,7 +96,7 @@ func (mr *Int) FitValInRange(val int) bool { // NormVal normalizes value to 0-1 unit range relative to current Min / Max range // Clips the value within Min-Max range first. func (mr *Int) NormValue(val int) float32 { - return float32(mr.ClipValue(val)-mr.Min) * mr.Scale() + return float32(mr.Clamp(val)-mr.Min) * mr.Scale() } // ProjVal projects a 0-1 normalized unit value into current Min / Max range (inverse of NormVal) @@ -105,7 +105,7 @@ func (mr *Int) ProjValue(val float32) float32 { } // ClipVal clips given value within Min / Max rangee -func (mr *Int) ClipValue(val int) int { +func (mr *Int) Clamp(val int) int { if val < mr.Min { return mr.Min } diff --git a/math32/minmax/range.go b/math32/minmax/range.go index 2d73b938f5..84e7c159e4 100644 --- a/math32/minmax/range.go +++ b/math32/minmax/range.go @@ -6,8 +6,6 @@ package minmax // Range32 represents a range of values for plotting, where the min or max can optionally be fixed type Range32 struct { - - // Min and Max range values F32 // fix the minimum end of the range @@ -18,15 +16,17 @@ type Range32 struct { } // SetMin sets a fixed min value -func (rr *Range32) SetMin(mn float32) { +func (rr *Range32) SetMin(mn float32) *Range32 { rr.FixMin = true rr.Min = mn + return rr } // SetMax sets a fixed max value -func (rr *Range32) SetMax(mx float32) { +func (rr *Range32) SetMax(mx float32) *Range32 { rr.FixMax = true rr.Max = mx + return rr } // Range returns Max - Min @@ -34,13 +34,23 @@ func (rr *Range32) Range() float32 { return rr.Max - rr.Min } +// Clamp returns min, max values clamped according to Fixed min / max of range. +func (rr *Range32) Clamp(mnIn, mxIn float32) (mn, mx float32) { + mn, mx = mnIn, mxIn + if rr.FixMin && rr.Min < mn { + mn = rr.Min + } + if rr.FixMax && rr.Max > mx { + mx = rr.Max + } + return +} + /////////////////////////////////////////////////////////////////////// // 64 // Range64 represents a range of values for plotting, where the min or max can optionally be fixed type Range64 struct { - - // Min and Max range values F64 // fix the minimum end of the range @@ -51,18 +61,32 @@ type Range64 struct { } // SetMin sets a fixed min value -func (rr *Range64) SetMin(mn float64) { +func (rr *Range64) SetMin(mn float64) *Range64 { rr.FixMin = true rr.Min = mn + return rr } // SetMax sets a fixed max value -func (rr *Range64) SetMax(mx float64) { +func (rr *Range64) SetMax(mx float64) *Range64 { rr.FixMax = true rr.Max = mx + return rr } // Range returns Max - Min func (rr *Range64) Range() float64 { return rr.Max - rr.Min } + +// Clamp returns min, max values clamped according to Fixed min / max of range. +func (rr *Range64) Clamp(mnIn, mxIn float64) (mn, mx float64) { + mn, mx = mnIn, mxIn + if rr.FixMin && rr.Min < mn { + mn = rr.Min + } + if rr.FixMax && rr.Max > mx { + mx = rr.Max + } + return +} diff --git a/plot/README.md b/plot/README.md deleted file mode 100644 index c5fe23479b..0000000000 --- a/plot/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# Plot - -The `plot` package generates 2D plots of data using the Cogent Core `paint` rendering system. The `plotcore` sub-package has Cogent Core Widgets that can be used in applications. -* `Plot` is just a wrapper around a `plot.Plot`, for manually-configured plots. -* `PlotEditor` is an interactive plot viewer that supports selection of which data to plot, and configuration of many plot parameters. - -The code is adapted from the [gonum plot](https://github.com/gonum/plot) package (which in turn was adapted from google's [plotinum](https://code.google.com/archive/p/plotinum/), to use the Cogent Core [styles](../styles) and [paint](../paint) rendering framework, which also supports SVG output of the rendering. - -Here is the copyright notice for that package: -```go -// Copyright Ā©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -``` - diff --git a/plot/axis.go b/plot/axis.go deleted file mode 100644 index 91c1d81daa..0000000000 --- a/plot/axis.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Adapted initially from gonum/plot: -// Copyright Ā©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package plot - -import ( - "cogentcore.org/core/math32" - "cogentcore.org/core/styles" - "cogentcore.org/core/styles/units" -) - -// Normalizer rescales values from the data coordinate system to the -// normalized coordinate system. -type Normalizer interface { - // Normalize transforms a value x in the data coordinate system to - // the normalized coordinate system. - Normalize(min, max, x float32) float32 -} - -// Axis represents either a horizontal or vertical -// axis of a plot. -type Axis struct { - // Min and Max are the minimum and maximum data - // values represented by the axis. - Min, Max float32 - - // specifies which axis this is: X or Y - Axis math32.Dims - - // Label for the axis - Label Text - - // Line styling properties for the axis line. - Line LineStyle - - // Padding between the axis line and the data. Having - // non-zero padding ensures that the data is never drawn - // on the axis, thus making it easier to see. - Padding units.Value - - // has the text style for rendering tick labels, and is shared for actual rendering - TickText Text - - // line style for drawing tick lines - TickLine LineStyle - - // length of tick lines - TickLength units.Value - - // Ticker generates the tick marks. Any tick marks - // returned by the Marker function that are not in - // range of the axis are not drawn. - Ticker Ticker - - // Scale transforms a value given in the data coordinate system - // to the normalized coordinate system of the axisā€”its distance - // along the axis as a fraction of the axis range. - Scale Normalizer - - // AutoRescale enables an axis to automatically adapt its minimum - // and maximum boundaries, according to its underlying Ticker. - AutoRescale bool - - // cached list of ticks, set in size - ticks []Tick -} - -// Sets Defaults, range is (āˆž, Ā­āˆž), and thus any finite -// value is less than Min and greater than Max. -func (ax *Axis) Defaults(dim math32.Dims) { - ax.Min = math32.Inf(+1) - ax.Max = math32.Inf(-1) - ax.Axis = dim - ax.Line.Defaults() - ax.Label.Defaults() - ax.Label.Style.Size.Dp(20) - ax.Padding.Pt(5) - ax.TickText.Defaults() - ax.TickText.Style.Size.Dp(16) - ax.TickText.Style.Padding.Dp(2) - ax.TickLine.Defaults() - ax.TickLength.Pt(8) - if dim == math32.Y { - ax.Label.Style.Rotation = -90 - ax.TickText.Style.Align = styles.End - } - ax.Scale = LinearScale{} - ax.Ticker = DefaultTicks{} -} - -// SanitizeRange ensures that the range of the axis makes sense. -func (ax *Axis) SanitizeRange() { - if math32.IsInf(ax.Min, 0) { - ax.Min = 0 - } - if math32.IsInf(ax.Max, 0) { - ax.Max = 0 - } - if ax.Min > ax.Max { - ax.Min, ax.Max = ax.Max, ax.Min - } - if ax.Min == ax.Max { - ax.Min-- - ax.Max++ - } - - if ax.AutoRescale { - marks := ax.Ticker.Ticks(ax.Min, ax.Max) - for _, t := range marks { - ax.Min = math32.Min(ax.Min, t.Value) - ax.Max = math32.Max(ax.Max, t.Value) - } - } -} - -// LinearScale an be used as the value of an Axis.Scale function to -// set the axis to a standard linear scale. -type LinearScale struct{} - -var _ Normalizer = LinearScale{} - -// Normalize returns the fractional distance of x between min and max. -func (LinearScale) Normalize(min, max, x float32) float32 { - return (x - min) / (max - min) -} - -// LogScale can be used as the value of an Axis.Scale function to -// set the axis to a log scale. -type LogScale struct{} - -var _ Normalizer = LogScale{} - -// Normalize returns the fractional logarithmic distance of -// x between min and max. -func (LogScale) Normalize(min, max, x float32) float32 { - if min <= 0 || max <= 0 || x <= 0 { - panic("Values must be greater than 0 for a log scale.") - } - logMin := math32.Log(min) - return (math32.Log(x) - logMin) / (math32.Log(max) - logMin) -} - -// InvertedScale can be used as the value of an Axis.Scale function to -// invert the axis using any Normalizer. -type InvertedScale struct{ Normalizer } - -var _ Normalizer = InvertedScale{} - -// Normalize returns a normalized [0, 1] value for the position of x. -func (is InvertedScale) Normalize(min, max, x float32) float32 { - return is.Normalizer.Normalize(max, min, x) -} - -// Norm returns the value of x, given in the data coordinate -// system, normalized to its distance as a fraction of the -// range of this axis. For example, if x is a.Min then the return -// value is 0, and if x is a.Max then the return value is 1. -func (ax *Axis) Norm(x float32) float32 { - return ax.Scale.Normalize(ax.Min, ax.Max, x) -} diff --git a/plot/data.go b/plot/data.go deleted file mode 100644 index adf8b662ff..0000000000 --- a/plot/data.go +++ /dev/null @@ -1,272 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Adapted from github.com/gonum/plot: -// Copyright Ā©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package plot - -import ( - "errors" - - "cogentcore.org/core/math32" -) - -// data defines the main data interfaces for plotting. -// Other more specific types of plots define their own interfaces. -// unlike gonum/plot, NaN values are treated as missing data here. - -var ( - ErrInfinity = errors.New("plotter: infinite data point") - ErrNoData = errors.New("plotter: no data points") -) - -// CheckFloats returns an error if any of the arguments are Infinity. -// or if there are no non-NaN data points available for plotting. -func CheckFloats(fs ...float32) error { - n := 0 - for _, f := range fs { - switch { - case math32.IsNaN(f): - case math32.IsInf(f, 0): - return ErrInfinity - default: - n++ - } - } - if n == 0 { - return ErrNoData - } - return nil -} - -// CheckNaNs returns true if any of the floats are NaN -func CheckNaNs(fs ...float32) bool { - for _, f := range fs { - if math32.IsNaN(f) { - return true - } - } - return false -} - -////////////////////////////////////////////////// -// Valuer - -// Valuer provides an interface for a list of scalar values -type Valuer interface { - // Len returns the number of values. - Len() int - - // Value returns a value. - Value(i int) float32 -} - -// Range returns the minimum and maximum values. -func Range(vs Valuer) (min, max float32) { - min = math32.Inf(1) - max = math32.Inf(-1) - for i := 0; i < vs.Len(); i++ { - v := vs.Value(i) - if math32.IsNaN(v) { - continue - } - min = math32.Min(min, v) - max = math32.Max(max, v) - } - return -} - -// Values implements the Valuer interface. -type Values []float32 - -func (vs Values) Len() int { - return len(vs) -} - -func (vs Values) Value(i int) float32 { - return vs[i] -} - -// CopyValues returns a Values that is a copy of the values -// from a Valuer, or an error if there are no values, or if one of -// the copied values is a Infinity. -// NaN values are skipped in the copying process. -func CopyValues(vs Valuer) (Values, error) { - if vs.Len() == 0 { - return nil, ErrNoData - } - cpy := make(Values, 0, vs.Len()) - for i := 0; i < vs.Len(); i++ { - v := vs.Value(i) - if math32.IsNaN(v) { - continue - } - if err := CheckFloats(v); err != nil { - return nil, err - } - cpy = append(cpy, v) - } - return cpy, nil -} - -////////////////////////////////////////////////// -// XYer - -// XYer provides an interface for a list of X,Y data pairs -type XYer interface { - // Len returns the number of x, y pairs. - Len() int - - // XY returns an x, y pair. - XY(i int) (x, y float32) -} - -// XYRange returns the minimum and maximum -// x and y values. -func XYRange(xys XYer) (xmin, xmax, ymin, ymax float32) { - xmin, xmax = Range(XValues{xys}) - ymin, ymax = Range(YValues{xys}) - return -} - -// XYs implements the XYer interface. -type XYs []math32.Vector2 - -func (xys XYs) Len() int { - return len(xys) -} - -func (xys XYs) XY(i int) (float32, float32) { - return xys[i].X, xys[i].Y -} - -// CopyXYs returns an XYs that is a copy of the x and y values from -// an XYer, or an error if one of the data points contains a NaN or -// Infinity. -func CopyXYs(data XYer) (XYs, error) { - if data.Len() == 0 { - return nil, ErrNoData - } - cpy := make(XYs, 0, data.Len()) - for i := range data.Len() { - x, y := data.XY(i) - if CheckNaNs(x, y) { - continue - } - if err := CheckFloats(x, y); err != nil { - return nil, err - } - cpy = append(cpy, math32.Vec2(x, y)) - } - return cpy, nil -} - -// PlotXYs returns plot coordinates for given set of XYs -func PlotXYs(plt *Plot, data XYs) XYs { - ps := make(XYs, len(data)) - for i := range data { - ps[i].X, ps[i].Y = plt.PX(data[i].X), plt.PY(data[i].Y) - } - return ps -} - -// XValues implements the Valuer interface, -// returning the x value from an XYer. -type XValues struct { - XYer -} - -func (xs XValues) Value(i int) float32 { - x, _ := xs.XY(i) - return x -} - -// YValues implements the Valuer interface, -// returning the y value from an XYer. -type YValues struct { - XYer -} - -func (ys YValues) Value(i int) float32 { - _, y := ys.XY(i) - return y -} - -////////////////////////////////////////////////// -// XYer - -// XYZer provides an interface for a list of X,Y,Z data triples. -// It also satisfies the XYer interface for the X,Y pairs. -type XYZer interface { - // Len returns the number of x, y, z triples. - Len() int - - // XYZ returns an x, y, z triple. - XYZ(i int) (float32, float32, float32) - - // XY returns an x, y pair. - XY(i int) (float32, float32) -} - -// XYZs implements the XYZer interface using a slice. -type XYZs []XYZ - -// XYZ is an x, y and z value. -type XYZ struct{ X, Y, Z float32 } - -// Len implements the Len method of the XYZer interface. -func (xyz XYZs) Len() int { - return len(xyz) -} - -// XYZ implements the XYZ method of the XYZer interface. -func (xyz XYZs) XYZ(i int) (float32, float32, float32) { - return xyz[i].X, xyz[i].Y, xyz[i].Z -} - -// XY implements the XY method of the XYer interface. -func (xyz XYZs) XY(i int) (float32, float32) { - return xyz[i].X, xyz[i].Y -} - -// CopyXYZs copies an XYZer. -func CopyXYZs(data XYZer) (XYZs, error) { - if data.Len() == 0 { - return nil, ErrNoData - } - cpy := make(XYZs, 0, data.Len()) - for i := range data.Len() { - x, y, z := data.XYZ(i) - if CheckNaNs(x, y, z) { - continue - } - if err := CheckFloats(x, y, z); err != nil { - return nil, err - } - cpy = append(cpy, XYZ{X: x, Y: y, Z: z}) - } - return cpy, nil -} - -// XYValues implements the XYer interface, returning -// the x and y values from an XYZer. -type XYValues struct{ XYZer } - -// XY implements the XY method of the XYer interface. -func (xy XYValues) XY(i int) (float32, float32) { - x, y, _ := xy.XYZ(i) - return x, y -} - -////////////////////////////////////////////////// -// Labeler - -// Labeler provides an interface for a list of string labels -type Labeler interface { - // Label returns a label. - Label(i int) string -} diff --git a/plot/draw.go b/plot/draw.go deleted file mode 100644 index 5d73bb95c6..0000000000 --- a/plot/draw.go +++ /dev/null @@ -1,450 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Adapted from gonum/plot: -// Copyright Ā©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package plot - -import ( - "bufio" - "bytes" - "image" - "io" - "os" - - "cogentcore.org/core/math32" - "cogentcore.org/core/styles" -) - -// SVGString returns an SVG representation of the plot as a string -func (pt *Plot) SVGString() string { - b := &bytes.Buffer{} - pt.Paint.SVGOut = b - pt.svgDraw() - pt.Paint.SVGOut = nil - return b.String() -} - -// svgDraw draws SVGOut writer that must already be set in Paint -func (pt *Plot) svgDraw() { - pt.drawConfig() - io.WriteString(pt.Paint.SVGOut, pt.Paint.SVGStart()) - pt.Draw() - io.WriteString(pt.Paint.SVGOut, pt.Paint.SVGEnd()) -} - -// SVGToFile saves the SVG to given file -func (pt *Plot) SVGToFile(filename string) error { - fp, err := os.Create(filename) - if err != nil { - return err - } - defer fp.Close() - bw := bufio.NewWriter(fp) - pt.Paint.SVGOut = bw - pt.svgDraw() - pt.Paint.SVGOut = nil - return bw.Flush() -} - -// drawConfig configures everything for drawing -func (pt *Plot) drawConfig() { - pt.Resize(pt.Size) // ensure - pt.Legend.TextStyle.openFont(pt) - pt.Paint.ToDots() -} - -// Draw draws the plot to image. -// Plotters are drawn in the order in which they were -// added to the plot. -func (pt *Plot) Draw() { - pt.drawConfig() - pc := pt.Paint - ptw := float32(pt.Size.X) - pth := float32(pt.Size.X) - - ptb := image.Rectangle{Max: pt.Size} - pc.PushBounds(ptb) - - if pt.Background != nil { - pc.BlitBox(math32.Vector2{}, math32.FromPoint(pt.Size), pt.Background) - } - - if pt.Title.Text != "" { - pt.Title.Config(pt) - pos := pt.Title.PosX(ptw) - pad := pt.Title.Style.Padding.Dots - pos.Y = pad - pt.Title.Draw(pt, pos) - th := pt.Title.PaintText.BBox.Size().Y + 2*pad - pth -= th - ptb.Min.Y += int(math32.Ceil(th)) - } - - pt.X.SanitizeRange() - pt.Y.SanitizeRange() - - ywidth, tickWidth, tpad, bpad := pt.Y.sizeY(pt) - xheight, lpad, rpad := pt.X.sizeX(pt, float32(pt.Size.X-int(ywidth))) - - tb := ptb - tb.Min.X += ywidth - pc.PushBounds(tb) - pt.X.drawX(pt, lpad, rpad) - pc.PopBounds() - - tb = ptb - tb.Max.Y -= xheight - pc.PushBounds(tb) - pt.Y.drawY(pt, tickWidth, tpad, bpad) - pc.PopBounds() - - tb = ptb - tb.Min.X += ywidth + lpad - tb.Max.X -= rpad - tb.Max.Y -= xheight + bpad - tb.Min.Y += tpad - pt.PlotBox.SetFromRect(tb) - - // don't cut off lines - tb.Min.X -= 2 - tb.Min.Y -= 2 - tb.Max.X += 2 - tb.Max.Y += 2 - pc.PushBounds(tb) - - for _, plt := range pt.Plotters { - plt.Plot(pt) - } - - pt.Legend.draw(pt) - pc.PopBounds() - pc.PopBounds() // global -} - -//////////////////////////////////////////////////////////////// -// Axis - -// drawTicks returns true if the tick marks should be drawn. -func (ax *Axis) drawTicks() bool { - return ax.TickLine.Width.Value > 0 && ax.TickLength.Value > 0 -} - -// sizeX returns the total height of the axis, left and right padding -func (ax *Axis) sizeX(pt *Plot, axw float32) (ht, lpad, rpad int) { - pc := pt.Paint - uc := &pc.UnitContext - ax.TickLength.ToDots(uc) - ax.ticks = ax.Ticker.Ticks(ax.Min, ax.Max) - h := float32(0) - if ax.Label.Text != "" { // We assume that the label isn't rotated. - ax.Label.Config(pt) - h += ax.Label.PaintText.BBox.Size().Y - h += ax.Label.Style.Padding.Dots - } - lw := ax.Line.Width.Dots - lpad = int(math32.Ceil(lw)) + 2 - rpad = int(math32.Ceil(lw)) + 10 - tht := float32(0) - if len(ax.ticks) > 0 { - if ax.drawTicks() { - h += ax.TickLength.Dots - } - ftk := ax.firstTickLabel() - if ftk.Label != "" { - px, _ := ax.tickPosX(pt, ftk, axw) - if px < 0 { - lpad += int(math32.Ceil(-px)) - } - tht = max(tht, ax.TickText.PaintText.BBox.Size().Y) - } - ltk := ax.lastTickLabel() - if ltk.Label != "" { - px, wd := ax.tickPosX(pt, ltk, axw) - if px+wd > axw { - rpad += int(math32.Ceil((px + wd) - axw)) - } - tht = max(tht, ax.TickText.PaintText.BBox.Size().Y) - } - ax.TickText.Text = ax.longestTickLabel() - if ax.TickText.Text != "" { - ax.TickText.Config(pt) - tht = max(tht, ax.TickText.PaintText.BBox.Size().Y) - } - h += ax.TickText.Style.Padding.Dots - } - h += tht + lw + ax.Padding.Dots - - ht = int(math32.Ceil(h)) - return -} - -// tickLabelPosX returns the relative position and width for given tick along X axis -// for given total axis width -func (ax *Axis) tickPosX(pt *Plot, t Tick, axw float32) (px, wd float32) { - x := axw * float32(ax.Norm(t.Value)) - if x < 0 || x > axw { - return - } - ax.TickText.Text = t.Label - ax.TickText.Config(pt) - pos := ax.TickText.PosX(0) - px = pos.X + x - wd = ax.TickText.PaintText.BBox.Size().X - return -} - -func (ax *Axis) firstTickLabel() Tick { - for _, tk := range ax.ticks { - if tk.Label != "" { - return tk - } - } - return Tick{} -} - -func (ax *Axis) lastTickLabel() Tick { - n := len(ax.ticks) - for i := n - 1; i >= 0; i-- { - tk := ax.ticks[i] - if tk.Label != "" { - return tk - } - } - return Tick{} -} - -func (ax *Axis) longestTickLabel() string { - lst := "" - for _, tk := range ax.ticks { - if len(tk.Label) > len(lst) { - lst = tk.Label - } - } - return lst -} - -func (ax *Axis) sizeY(pt *Plot) (ywidth, tickWidth, tpad, bpad int) { - pc := pt.Paint - uc := &pc.UnitContext - ax.ticks = ax.Ticker.Ticks(ax.Min, ax.Max) - ax.TickLength.ToDots(uc) - - w := float32(0) - if ax.Label.Text != "" { - ax.Label.Config(pt) - w += ax.Label.PaintText.BBox.Size().X - w += ax.Label.Style.Padding.Dots - } - - lw := ax.Line.Width.Dots - tpad = int(math32.Ceil(lw)) + 2 - bpad = int(math32.Ceil(lw)) + 2 - - if len(ax.ticks) > 0 { - if ax.drawTicks() { - w += ax.TickLength.Dots - } - ax.TickText.Text = ax.longestTickLabel() - if ax.TickText.Text != "" { - ax.TickText.Config(pt) - tw := ax.TickText.PaintText.BBox.Size().X - w += tw - tickWidth = int(math32.Ceil(tw)) - w += ax.TickText.Style.Padding.Dots - tht := int(math32.Ceil(0.5 * ax.TickText.PaintText.BBox.Size().X)) - tpad += tht - bpad += tht - } - } - w += lw + ax.Padding.Dots - ywidth = int(math32.Ceil(w)) - return -} - -// drawX draws the horizontal axis -func (ax *Axis) drawX(pt *Plot, lpad, rpad int) { - ab := pt.Paint.Bounds - ab.Min.X += lpad - ab.Max.X -= rpad - axw := float32(ab.Size().X) - // axh := float32(ab.Size().Y) // height of entire plot - if ax.Label.Text != "" { - ax.Label.Config(pt) - pos := ax.Label.PosX(axw) - pos.X += float32(ab.Min.X) - th := ax.Label.PaintText.BBox.Size().Y - pos.Y = float32(ab.Max.Y) - th - ax.Label.Draw(pt, pos) - ab.Max.Y -= int(math32.Ceil(th + ax.Label.Style.Padding.Dots)) - } - - tickHt := float32(0) - for _, t := range ax.ticks { - x := axw * float32(ax.Norm(t.Value)) - if x < 0 || x > axw || t.IsMinor() { - continue - } - ax.TickText.Text = t.Label - ax.TickText.Config(pt) - pos := ax.TickText.PosX(0) - pos.X += x + float32(ab.Min.X) - tickHt = ax.TickText.PaintText.BBox.Size().Y + ax.TickText.Style.Padding.Dots - pos.Y += float32(ab.Max.Y) - tickHt - ax.TickText.Draw(pt, pos) - } - - if len(ax.ticks) > 0 { - ab.Max.Y -= int(math32.Ceil(tickHt)) - // } else { - // y += ax.Width / 2 - } - - if len(ax.ticks) > 0 && ax.drawTicks() { - ln := ax.TickLength.Dots - for _, t := range ax.ticks { - yoff := float32(0) - if t.IsMinor() { - yoff = 0.5 * ln - } - x := axw * float32(ax.Norm(t.Value)) - if x < 0 || x > axw { - continue - } - x += float32(ab.Min.X) - ax.TickLine.Draw(pt, math32.Vec2(x, float32(ab.Max.Y)-yoff), math32.Vec2(x, float32(ab.Max.Y)-ln)) - } - ab.Max.Y -= int(ln - 0.5*ax.Line.Width.Dots) - } - - ax.Line.Draw(pt, math32.Vec2(float32(ab.Min.X), float32(ab.Max.Y)), math32.Vec2(float32(ab.Min.X)+axw, float32(ab.Max.Y))) -} - -// drawY draws the Y axis along the left side -func (ax *Axis) drawY(pt *Plot, tickWidth, tpad, bpad int) { - ab := pt.Paint.Bounds - ab.Min.Y += tpad - ab.Max.Y -= bpad - axh := float32(ab.Size().Y) - if ax.Label.Text != "" { - ax.Label.Style.Align = styles.Center - pos := ax.Label.PosY(axh) - tw := ax.Label.PaintText.BBox.Size().X - pos.Y += float32(ab.Min.Y) + ax.Label.PaintText.BBox.Size().Y - pos.X = float32(ab.Min.X) - ax.Label.Draw(pt, pos) - ab.Min.X += int(math32.Ceil(tw + ax.Label.Style.Padding.Dots)) - } - - tickWd := float32(0) - for _, t := range ax.ticks { - y := axh * (1 - float32(ax.Norm(t.Value))) - if y < 0 || y > axh || t.IsMinor() { - continue - } - ax.TickText.Text = t.Label - ax.TickText.Config(pt) - pos := ax.TickText.PosX(float32(tickWidth)) - pos.X += float32(ab.Min.X) - pos.Y = float32(ab.Min.Y) + y - 0.5*ax.TickText.PaintText.BBox.Size().Y - tickWd = max(tickWd, ax.TickText.PaintText.BBox.Size().X+ax.TickText.Style.Padding.Dots) - ax.TickText.Draw(pt, pos) - } - - if len(ax.ticks) > 0 { - ab.Min.X += int(math32.Ceil(tickWd)) - // } else { - // y += ax.Width / 2 - } - - if len(ax.ticks) > 0 && ax.drawTicks() { - ln := ax.TickLength.Dots - for _, t := range ax.ticks { - xoff := float32(0) - if t.IsMinor() { - xoff = 0.5 * ln - } - y := axh * (1 - float32(ax.Norm(t.Value))) - if y < 0 || y > axh { - continue - } - y += float32(ab.Min.Y) - ax.TickLine.Draw(pt, math32.Vec2(float32(ab.Min.X)+xoff, y), math32.Vec2(float32(ab.Min.X)+ln, y)) - } - ab.Min.X += int(ln + 0.5*ax.Line.Width.Dots) - } - - ax.Line.Draw(pt, math32.Vec2(float32(ab.Min.X), float32(ab.Min.Y)), math32.Vec2(float32(ab.Min.X), float32(ab.Max.Y))) -} - -//////////////////////////////////////////////// -// Legend - -// draw draws the legend -func (lg *Legend) draw(pt *Plot) { - pc := pt.Paint - uc := &pc.UnitContext - ptb := pc.Bounds - - lg.ThumbnailWidth.ToDots(uc) - lg.TextStyle.ToDots(uc) - lg.Position.XOffs.ToDots(uc) - lg.Position.YOffs.ToDots(uc) - lg.TextStyle.openFont(pt) - - em := lg.TextStyle.Font.Face.Metrics.Em - pad := math32.Ceil(lg.TextStyle.Padding.Dots) - - var ltxt Text - ltxt.Style = lg.TextStyle - var sz image.Point - maxTht := 0 - for _, e := range lg.Entries { - ltxt.Text = e.Text - ltxt.Config(pt) - sz.X = max(sz.X, int(math32.Ceil(ltxt.PaintText.BBox.Size().X))) - tht := int(math32.Ceil(ltxt.PaintText.BBox.Size().Y + pad)) - maxTht = max(tht, maxTht) - } - sz.X += int(em) - sz.Y = len(lg.Entries) * maxTht - txsz := sz - sz.X += int(lg.ThumbnailWidth.Dots) - - pos := ptb.Min - if lg.Position.Left { - pos.X += int(lg.Position.XOffs.Dots) - } else { - pos.X = ptb.Max.X - sz.X - int(lg.Position.XOffs.Dots) - } - if lg.Position.Top { - pos.Y += int(lg.Position.YOffs.Dots) - } else { - pos.Y = ptb.Max.Y - sz.Y - int(lg.Position.YOffs.Dots) - } - - if lg.Fill != nil { - pc.FillBox(math32.FromPoint(pos), math32.FromPoint(sz), lg.Fill) - } - cp := pos - thsz := image.Point{X: int(lg.ThumbnailWidth.Dots), Y: maxTht - 2*int(pad)} - for _, e := range lg.Entries { - tp := cp - tp.X += int(txsz.X) - tp.Y += int(pad) - tb := image.Rectangle{Min: tp, Max: tp.Add(thsz)} - pc.PushBounds(tb) - for _, t := range e.Thumbs { - t.Thumbnail(pt) - } - pc.PopBounds() - ltxt.Text = e.Text - ltxt.Config(pt) - ltxt.Draw(pt, math32.FromPoint(cp)) - cp.Y += maxTht - } -} diff --git a/plot/labelling.go b/plot/labelling.go deleted file mode 100644 index 29b2b1cc64..0000000000 --- a/plot/labelling.go +++ /dev/null @@ -1,277 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Copied directly from gonum/plot: -// Copyright Ā©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This is an implementation of the Talbot, Lin and Hanrahan algorithm -// described in doi:10.1109/TVCG.2010.130 with reference to the R -// implementation in the labeling package, Ā©2014 Justin Talbot (Licensed -// MIT+file LICENSE|Unlimited). - -package plot - -import "cogentcore.org/core/math32" - -const ( - // dlamchE is the machine epsilon. For IEEE this is 2^{-53}. - dlamchE = 1.0 / (1 << 53) - - // dlamchB is the radix of the machine (the base of the number system). - dlamchB = 2 - - // dlamchP is base * eps. - dlamchP = dlamchB * dlamchE -) - -const ( - // free indicates no restriction on label containment. - free = iota - // containData specifies that all the data range lies - // within the interval [label_min, label_max]. - containData - // withinData specifies that all labels lie within the - // interval [dMin, dMax]. - withinData -) - -// talbotLinHanrahan returns an optimal set of approximately want label values -// for the data range [dMin, dMax], and the step and magnitude of the step between values. -// containment is specifies are guarantees for label and data range containment, valid -// values are free, containData and withinData. -// The optional parameters Q, nice numbers, and w, weights, allow tuning of the -// algorithm but by default (when nil) are set to the parameters described in the -// paper. -// The legibility function allows tuning of the legibility assessment for labels. -// By default, when nil, legbility will set the legibility score for each candidate -// labelling scheme to 1. -// See the paper for an explanation of the function of Q, w and legibility. -func talbotLinHanrahan(dMin, dMax float32, want int, containment int, Q []float32, w *weights, legibility func(lMin, lMax, lStep float32) float32) (values []float32, step, q float32, magnitude int) { - const eps = dlamchP * 100 - - if dMin > dMax { - panic("labelling: invalid data range: min greater than max") - } - - if Q == nil { - Q = []float32{1, 5, 2, 2.5, 4, 3} - } - if w == nil { - w = &weights{ - simplicity: 0.25, - coverage: 0.2, - density: 0.5, - legibility: 0.05, - } - } - if legibility == nil { - legibility = unitLegibility - } - - if r := dMax - dMin; r < eps { - l := make([]float32, want) - step := r / float32(want-1) - for i := range l { - l[i] = dMin + float32(i)*step - } - magnitude = minAbsMag(dMin, dMax) - return l, step, 0, magnitude - } - - type selection struct { - // n is the number of labels selected. - n int - // lMin and lMax are the selected min - // and max label values. lq is the q - // chosen. - lMin, lMax, lStep, lq float32 - // score is the score for the selection. - score float32 - // magnitude is the magnitude of the - // label step distance. - magnitude int - } - best := selection{score: -2} - -outer: - for skip := 1; ; skip++ { - for _, q := range Q { - sm := maxSimplicity(q, Q, skip) - if w.score(sm, 1, 1, 1) < best.score { - break outer - } - - for have := 2; ; have++ { - dm := maxDensity(have, want) - if w.score(sm, 1, dm, 1) < best.score { - break - } - - delta := (dMax - dMin) / float32(have+1) / float32(skip) / q - - const maxExp = 309 - for mag := int(math32.Ceil(math32.Log10(delta))); mag < maxExp; mag++ { - step := float32(skip) * q * math32.Pow10(mag) - - cm := maxCoverage(dMin, dMax, step*float32(have-1)) - if w.score(sm, cm, dm, 1) < best.score { - break - } - - fracStep := step / float32(skip) - kStep := step * float32(have-1) - - minStart := (math32.Floor(dMax/step) - float32(have-1)) * float32(skip) - maxStart := math32.Ceil(dMax/step) * float32(skip) - for start := minStart; start <= maxStart && start != start-1; start++ { - lMin := start * fracStep - lMax := lMin + kStep - - switch containment { - case containData: - if dMin < lMin || lMax < dMax { - continue - } - case withinData: - if lMin < dMin || dMax < lMax { - continue - } - case free: - // Free choice. - } - - score := w.score( - simplicity(q, Q, skip, lMin, lMax, step), - coverage(dMin, dMax, lMin, lMax), - density(have, want, dMin, dMax, lMin, lMax), - legibility(lMin, lMax, step), - ) - if score > best.score { - best = selection{ - n: have, - lMin: lMin, - lMax: lMax, - lStep: float32(skip) * q, - lq: q, - score: score, - magnitude: mag, - } - } - } - } - } - } - } - - if best.score == -2 { - l := make([]float32, want) - step := (dMax - dMin) / float32(want-1) - for i := range l { - l[i] = dMin + float32(i)*step - } - magnitude = minAbsMag(dMin, dMax) - return l, step, 0, magnitude - } - - l := make([]float32, best.n) - step = best.lStep * math32.Pow10(best.magnitude) - for i := range l { - l[i] = best.lMin + float32(i)*step - } - return l, best.lStep, best.lq, best.magnitude -} - -// minAbsMag returns the minumum magnitude of the absolute values of a and b. -func minAbsMag(a, b float32) int { - return int(math32.Min(math32.Floor(math32.Log10(math32.Abs(a))), (math32.Floor(math32.Log10(math32.Abs(b)))))) -} - -// simplicity returns the simplicity score for how will the curent q, lMin, lMax, -// lStep and skip match the given nice numbers, Q. -func simplicity(q float32, Q []float32, skip int, lMin, lMax, lStep float32) float32 { - const eps = dlamchP * 100 - - for i, v := range Q { - if v == q { - m := math32.Mod(lMin, lStep) - v = 0 - if (m < eps || lStep-m < eps) && lMin <= 0 && 0 <= lMax { - v = 1 - } - return 1 - float32(i)/(float32(len(Q))-1) - float32(skip) + v - } - } - panic("labelling: invalid q for Q") -} - -// maxSimplicity returns the maximum simplicity for q, Q and skip. -func maxSimplicity(q float32, Q []float32, skip int) float32 { - for i, v := range Q { - if v == q { - return 1 - float32(i)/(float32(len(Q))-1) - float32(skip) + 1 - } - } - panic("labelling: invalid q for Q") -} - -// coverage returns the coverage score for based on the average -// squared distance between the extreme labels, lMin and lMax, and -// the extreme data points, dMin and dMax. -func coverage(dMin, dMax, lMin, lMax float32) float32 { - r := 0.1 * (dMax - dMin) - max := dMax - lMax - min := dMin - lMin - return 1 - 0.5*(max*max+min*min)/(r*r) -} - -// maxCoverage returns the maximum coverage achievable for the data -// range. -func maxCoverage(dMin, dMax, span float32) float32 { - r := dMax - dMin - if span <= r { - return 1 - } - h := 0.5 * (span - r) - r *= 0.1 - return 1 - (h*h)/(r*r) -} - -// density returns the density score which measures the goodness of -// the labelling density compared to the user defined target -// based on the want parameter given to talbotLinHanrahan. -func density(have, want int, dMin, dMax, lMin, lMax float32) float32 { - rho := float32(have-1) / (lMax - lMin) - rhot := float32(want-1) / (math32.Max(lMax, dMax) - math32.Min(dMin, lMin)) - if d := rho / rhot; d >= 1 { - return 2 - d - } - return 2 - rhot/rho -} - -// maxDensity returns the maximum density score achievable for have and want. -func maxDensity(have, want int) float32 { - if have < want { - return 1 - } - return 2 - float32(have-1)/float32(want-1) -} - -// unitLegibility returns a default legibility score ignoring label -// spacing. -func unitLegibility(_, _, _ float32) float32 { - return 1 -} - -// weights is a helper type to calcuate the labelling scheme's total score. -type weights struct { - simplicity, coverage, density, legibility float32 -} - -// score returns the score for a labelling scheme with simplicity, s, -// coverage, c, density, d and legibility l. -func (w *weights) score(s, c, d, l float32) float32 { - return w.simplicity*s + w.coverage*c + w.density*d + w.legibility*l -} diff --git a/plot/legend.go b/plot/legend.go deleted file mode 100644 index b7b4a2596a..0000000000 --- a/plot/legend.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package plot - -import ( - "image" - - "cogentcore.org/core/colors" - "cogentcore.org/core/colors/gradient" - "cogentcore.org/core/styles/units" -) - -// LegendPosition specifies where to put the legend -type LegendPosition struct { - // Top and Left specify the location of the legend. - Top, Left bool - - // XOffs and YOffs are added to the legend's final position, - // relative to the relevant anchor position - XOffs, YOffs units.Value -} - -func (lg *LegendPosition) Defaults() { - lg.Top = true -} - -// A Legend gives a description of the meaning of different -// data elements of the plot. Each legend entry has a name -// and a thumbnail, where the thumbnail shows a small -// sample of the display style of the corresponding data. -type Legend struct { - // TextStyle is the style given to the legend entry texts. - TextStyle TextStyle - - // position of the legend - Position LegendPosition `display:"inline"` - - // ThumbnailWidth is the width of legend thumbnails. - ThumbnailWidth units.Value - - // Fill specifies the background fill color for the legend box, - // if non-nil. - Fill image.Image - - // Entries are all of the LegendEntries described by this legend. - Entries []LegendEntry -} - -func (lg *Legend) Defaults() { - lg.TextStyle.Defaults() - lg.TextStyle.Padding.Dp(2) - lg.TextStyle.Font.Size.Dp(20) - lg.Position.Defaults() - lg.ThumbnailWidth.Pt(20) - lg.Fill = gradient.ApplyOpacity(colors.Scheme.Surface, 0.75) -} - -// Add adds an entry to the legend with the given name. -// The entry's thumbnail is drawn as the composite of all of the -// thumbnails. -func (lg *Legend) Add(name string, thumbs ...Thumbnailer) { - lg.Entries = append(lg.Entries, LegendEntry{Text: name, Thumbs: thumbs}) -} - -// LegendForPlotter returns the legend Text for given plotter, -// if it exists as a Thumbnailer in the legend entries. -// Otherwise returns empty string. -func (lg *Legend) LegendForPlotter(plt Plotter) string { - for _, e := range lg.Entries { - for _, tn := range e.Thumbs { - if tp, isp := tn.(Plotter); isp && tp == plt { - return e.Text - } - } - } - return "" -} - -// Thumbnailer wraps the Thumbnail method, which -// draws the small image in a legend representing the -// style of data. -type Thumbnailer interface { - // Thumbnail draws an thumbnail representing - // a legend entry. The thumbnail will usually show - // a smaller representation of the style used - // to plot the corresponding data. - Thumbnail(pt *Plot) -} - -// A LegendEntry represents a single line of a legend, it -// has a name and an icon. -type LegendEntry struct { - // text is the text associated with this entry. - Text string - - // thumbs is a slice of all of the thumbnails styles - Thumbs []Thumbnailer -} diff --git a/plot/line.go b/plot/line.go deleted file mode 100644 index 2e05073391..0000000000 --- a/plot/line.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package plot - -import ( - "image" - - "cogentcore.org/core/colors" - "cogentcore.org/core/math32" - "cogentcore.org/core/styles/units" -) - -// LineStyle has style properties for line drawing -type LineStyle struct { - - // stroke color image specification; stroking is off if nil - Color image.Image - - // line width - Width units.Value - - // Dashes are the dashes of the stroke. Each pair of values specifies - // the amount to paint and then the amount to skip. - Dashes []float32 -} - -func (ls *LineStyle) Defaults() { - ls.Color = colors.Scheme.OnSurface - ls.Width.Pt(1) -} - -// SetStroke sets the stroke style in plot paint to current line style. -// returns false if either the Width = 0 or Color is nil -func (ls *LineStyle) SetStroke(pt *Plot) bool { - if ls.Color == nil { - return false - } - pc := pt.Paint - uc := &pc.UnitContext - ls.Width.ToDots(uc) - if ls.Width.Dots == 0 { - return false - } - pc.StrokeStyle.Width = ls.Width - pc.StrokeStyle.Color = ls.Color - pc.StrokeStyle.ToDots(uc) - return true -} - -// Draw draws a line between given coordinates, setting the stroke style -// to current parameters. Returns false if either Width = 0 or Color = nil -func (ls *LineStyle) Draw(pt *Plot, start, end math32.Vector2) bool { - if !ls.SetStroke(pt) { - return false - } - pc := pt.Paint - pc.MoveTo(start.X, start.Y) - pc.LineTo(end.X, end.Y) - pc.Stroke() - return true -} diff --git a/plot/plot.go b/plot/plot.go deleted file mode 100644 index cdd40511b1..0000000000 --- a/plot/plot.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Adapted from github.com/gonum/plot: -// Copyright Ā©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package plot - -//go:generate core generate -add-types - -import ( - "image" - - "cogentcore.org/core/base/iox/imagex" - "cogentcore.org/core/colors" - "cogentcore.org/core/math32" - "cogentcore.org/core/paint" - "cogentcore.org/core/styles" -) - -// Plot is the basic type representing a plot. -// It renders into its own image.RGBA Pixels image, -// and can also save a corresponding SVG version. -// The Axis ranges are updated automatically when plots -// are added, so setting a fixed range should happen -// after that point. See [UpdateRange] method as well. -type Plot struct { - // Title of the plot - Title Text - - // Background is the background of the plot. - // The default is [colors.Scheme.Surface]. - Background image.Image - - // standard text style with default options - StandardTextStyle styles.Text - - // X and Y are the horizontal and vertical axes - // of the plot respectively. - X, Y Axis - - // Legend is the plot's legend. - Legend Legend - - // plotters are drawn by calling their Plot method - // after the axes are drawn. - Plotters []Plotter - - // size is the target size of the image to render to - Size image.Point - - // DPI is the dots per inch for rendering the image. - // Larger numbers result in larger scaling of the plot contents - // which is strongly recommended for print (e.g., use 300 for print) - DPI float32 `default:"96,160,300"` - - // painter for rendering - Paint *paint.Context - - // pixels that we render into - Pixels *image.RGBA `copier:"-" json:"-" xml:"-" edit:"-"` - - // Current plot bounding box in image coordinates, for plotting coordinates - PlotBox math32.Box2 -} - -// Defaults sets defaults -func (pt *Plot) Defaults() { - pt.Title.Defaults() - pt.Title.Style.Size.Dp(24) - pt.Background = colors.Scheme.Surface - pt.X.Defaults(math32.X) - pt.Y.Defaults(math32.Y) - pt.Legend.Defaults() - pt.DPI = 96 - pt.Size = image.Point{1280, 1024} - pt.StandardTextStyle.Defaults() - pt.StandardTextStyle.WhiteSpace = styles.WhiteSpaceNowrap -} - -// New returns a new plot with some reasonable default settings. -func New() *Plot { - pt := &Plot{} - pt.Defaults() - return pt -} - -// Add adds a Plotters to the plot. -// -// If the plotters implements DataRanger then the -// minimum and maximum values of the X and Y -// axes are changed if necessary to fit the range of -// the data. -// -// When drawing the plot, Plotters are drawn in the -// order in which they were added to the plot. -func (pt *Plot) Add(ps ...Plotter) { - pt.Plotters = append(pt.Plotters, ps...) -} - -// SetPixels sets the backing pixels image to given image.RGBA -func (pt *Plot) SetPixels(img *image.RGBA) { - pt.Pixels = img - pt.Paint = paint.NewContextFromImage(pt.Pixels) - pt.Paint.UnitContext.DPI = pt.DPI - pt.Size = pt.Pixels.Bounds().Size() - pt.UpdateRange() // needs context, to automatically update for labels -} - -// Resize sets the size of the output image to given size. -// Does nothing if already the right size. -func (pt *Plot) Resize(sz image.Point) { - if pt.Pixels != nil { - ib := pt.Pixels.Bounds().Size() - if ib == sz { - pt.Size = sz - pt.Paint.UnitContext.DPI = pt.DPI - return // already good - } - } - pt.SetPixels(image.NewRGBA(image.Rectangle{Max: sz})) -} - -func (pt *Plot) SaveImage(filename string) error { - return imagex.Save(pt.Pixels, filename) -} - -// NominalX configures the plot to have a nominal X -// axisā€”an X axis with names instead of numbers. The -// X location corresponding to each name are the integers, -// e.g., the x value 0 is centered above the first name and -// 1 is above the second name, etc. Labels for x values -// that do not end up in range of the X axis will not have -// tick marks. -func (pt *Plot) NominalX(names ...string) { - pt.X.TickLine.Width.Pt(0) - pt.X.TickLength.Pt(0) - pt.X.Line.Width.Pt(0) - // pt.Y.Padding.Pt(pt.X.Tick.Label.Width(names[0]) / 2) - ticks := make([]Tick, len(names)) - for i, name := range names { - ticks[i] = Tick{float32(i), name} - } - pt.X.Ticker = ConstantTicks(ticks) -} - -// HideX configures the X axis so that it will not be drawn. -func (pt *Plot) HideX() { - pt.X.TickLength.Pt(0) - pt.X.Line.Width.Pt(0) - pt.X.Ticker = ConstantTicks([]Tick{}) -} - -// HideY configures the Y axis so that it will not be drawn. -func (pt *Plot) HideY() { - pt.Y.TickLength.Pt(0) - pt.Y.Line.Width.Pt(0) - pt.Y.Ticker = ConstantTicks([]Tick{}) -} - -// HideAxes hides the X and Y axes. -func (pt *Plot) HideAxes() { - pt.HideX() - pt.HideY() -} - -// NominalY is like NominalX, but for the Y axis. -func (pt *Plot) NominalY(names ...string) { - pt.Y.TickLine.Width.Pt(0) - pt.Y.TickLength.Pt(0) - pt.Y.Line.Width.Pt(0) - // pt.X.Padding = pt.Y.Tick.Label.Height(names[0]) / 2 - ticks := make([]Tick, len(names)) - for i, name := range names { - ticks[i] = Tick{float32(i), name} - } - pt.Y.Ticker = ConstantTicks(ticks) -} - -// UpdateRange updates the axis range values based on current Plot values. -// This first resets the range so any fixed additional range values should -// be set after this point. -func (pt *Plot) UpdateRange() { - pt.X.Min = math32.Inf(+1) - pt.X.Max = math32.Inf(-1) - pt.Y.Min = math32.Inf(+1) - pt.Y.Max = math32.Inf(-1) - for _, d := range pt.Plotters { - pt.UpdateRangeFromPlotter(d) - } -} - -func (pt *Plot) UpdateRangeFromPlotter(d Plotter) { - if x, ok := d.(DataRanger); ok { - xmin, xmax, ymin, ymax := x.DataRange(pt) - pt.X.Min = math32.Min(pt.X.Min, xmin) - pt.X.Max = math32.Max(pt.X.Max, xmax) - pt.Y.Min = math32.Min(pt.Y.Min, ymin) - pt.Y.Max = math32.Max(pt.Y.Max, ymax) - } -} - -// PX returns the X-axis plotting coordinate for given raw data value -// using the current plot bounding region -func (pt *Plot) PX(v float32) float32 { - return pt.PlotBox.ProjectX(pt.X.Norm(v)) -} - -// PY returns the Y-axis plotting coordinate for given raw data value -func (pt *Plot) PY(v float32) float32 { - return pt.PlotBox.ProjectY(1 - pt.Y.Norm(v)) -} - -// ClosestDataToPixel returns the Plotter data point closest to given pixel point, -// in the Pixels image. -func (pt *Plot) ClosestDataToPixel(px, py int) (plt Plotter, idx int, dist float32, data, pixel math32.Vector2, legend string) { - tp := math32.Vec2(float32(px), float32(py)) - dist = float32(math32.MaxFloat32) - for _, p := range pt.Plotters { - dts, pxls := p.XYData() - for i := range pxls.Len() { - ptx, pty := pxls.XY(i) - pxy := math32.Vec2(ptx, pty) - d := pxy.DistanceTo(tp) - if d < dist { - dist = d - pixel = pxy - plt = p - idx = i - dx, dy := dts.XY(i) - data = math32.Vec2(dx, dy) - legend = pt.Legend.LegendForPlotter(p) - } - } - } - return -} diff --git a/plot/plot_test.go b/plot/plot_test.go deleted file mode 100644 index 8332dee986..0000000000 --- a/plot/plot_test.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package plot - -import ( - "image" - "os" - "testing" - - "cogentcore.org/core/base/iox/imagex" - "cogentcore.org/core/paint" -) - -func TestMain(m *testing.M) { - paint.FontLibrary.InitFontPaths(paint.FontPaths...) - os.Exit(m.Run()) -} - -func TestPlot(t *testing.T) { - pt := New() - pt.Title.Text = "Test Plot" - pt.X.Min = 0 - pt.X.Max = 100 - pt.X.Label.Text = "X Axis" - pt.Y.Min = 0 - pt.Y.Max = 100 - pt.Y.Label.Text = "Y Axis" - - pt.Resize(image.Point{640, 480}) - pt.Draw() - imagex.Assert(t, pt.Pixels, "plot.png") -} diff --git a/plot/plotcore/barplot.go b/plot/plotcore/barplot.go deleted file mode 100644 index de74cd45ff..0000000000 --- a/plot/plotcore/barplot.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package plotcore - -import ( - "fmt" - "log" - - "cogentcore.org/core/base/errors" - "cogentcore.org/core/colors" - "cogentcore.org/core/math32" - "cogentcore.org/core/math32/minmax" - "cogentcore.org/core/plot" - "cogentcore.org/core/plot/plots" - "cogentcore.org/core/tensor/stats/split" - "cogentcore.org/core/tensor/table" -) - -// bar plot is on integer positions, with different Y values and / or -// legend values interleaved - -// genPlotBar generates a Bar plot, setting GPlot variable -func (pl *PlotEditor) genPlotBar() { - plt := plot.New() // note: not clear how to re-use, due to newtablexynames - if pl.Options.BarWidth > 1 { - pl.Options.BarWidth = .8 - } - - // process xaxis first - xi, xview, err := pl.plotXAxis(plt, pl.table) - if err != nil { - return - } - xp := pl.Columns[xi] - - var lsplit *table.Splits - nleg := 1 - if pl.Options.Legend != "" { - _, err = pl.table.Table.ColumnIndex(pl.Options.Legend) - if err != nil { - log.Println("plot.Legend: " + err.Error()) - } else { - xview.SortColumnNames([]string{pl.Options.Legend, xp.Column}, table.Ascending) // make it fit! - lsplit = split.GroupBy(xview, pl.Options.Legend) - nleg = max(lsplit.Len(), 1) - } - } - - var firstXY *tableXY - var strCols []*ColumnOptions - nys := 0 - for _, cp := range pl.Columns { - if !cp.On { - continue - } - if cp.IsString { - strCols = append(strCols, cp) - continue - } - if cp.TensorIndex < 0 { - yc := errors.Log1(pl.table.Table.ColumnByName(cp.Column)) - _, sz := yc.RowCellSize() - nys += sz - } else { - nys++ - } - } - - if nys == 0 { - return - } - - stride := nys * nleg - if stride > 1 { - stride += 1 // extra gap - } - - yoff := 0 - yidx := 0 - maxx := 0 // max number of x values - for _, cp := range pl.Columns { - if !cp.On || cp == xp { - continue - } - if cp.IsString { - continue - } - start := yoff - for li := 0; li < nleg; li++ { - lview := xview - leg := "" - if lsplit != nil && len(lsplit.Values) > li { - leg = lsplit.Values[li][0] - lview = lsplit.Splits[li] - } - nidx := 1 - stidx := cp.TensorIndex - if cp.TensorIndex < 0 { // do all - yc := errors.Log1(pl.table.Table.ColumnByName(cp.Column)) - _, sz := yc.RowCellSize() - nidx = sz - stidx = 0 - } - for ii := 0; ii < nidx; ii++ { - idx := stidx + ii - xy, _ := newTableXYName(lview, xi, xp.TensorIndex, cp.Column, idx, cp.Range) - if xy == nil { - continue - } - maxx = max(maxx, lview.Len()) - if firstXY == nil { - firstXY = xy - } - lbl := cp.getLabel() - clr := cp.Color - if leg != "" { - lbl = leg + " " + lbl - } - if nleg > 1 { - cidx := yidx*nleg + li - clr = colors.Uniform(colors.Spaced(cidx)) - } - if nidx > 1 { - clr = colors.Uniform(colors.Spaced(idx)) - lbl = fmt.Sprintf("%s_%02d", lbl, idx) - } - ec := -1 - if cp.ErrColumn != "" { - ec, _ = pl.table.Table.ColumnIndex(cp.ErrColumn) - } - var bar *plots.BarChart - if ec >= 0 { - exy, _ := newTableXY(lview, ec, 0, ec, 0, minmax.Range32{}) - bar, err = plots.NewBarChart(xy, exy) - if err != nil { - // log.Println(err) - continue - } - } else { - bar, err = plots.NewBarChart(xy, nil) - if err != nil { - // log.Println(err) - continue - } - } - bar.Color = clr - bar.Stride = float32(stride) - bar.Offset = float32(start) - bar.Width = pl.Options.BarWidth - plt.Add(bar) - plt.Legend.Add(lbl, bar) - start++ - } - } - yidx++ - yoff += nleg - } - mid := (stride - 1) / 2 - if stride > 1 { - mid = (stride - 2) / 2 - } - if firstXY != nil && len(strCols) > 0 { - firstXY.table = xview - n := xview.Len() - for _, cp := range strCols { - xy, _ := newTableXY(xview, xi, xp.TensorIndex, firstXY.yColumn, cp.TensorIndex, firstXY.yRange) - xy.labelColumn, _ = xview.Table.ColumnIndex(cp.Column) - xy.yIndex = firstXY.yIndex - - xyl := plots.XYLabels{} - xyl.XYs = make(plot.XYs, n) - xyl.Labels = make([]string, n) - - for i := range xview.Indexes { - y := firstXY.Value(i) - x := float32(mid + (i%maxx)*stride) - xyl.XYs[i] = math32.Vec2(x, y) - xyl.Labels[i] = xy.Label(i) - } - lbls, _ := plots.NewLabels(xyl) - if lbls != nil { - plt.Add(lbls) - } - } - } - - netn := pl.table.Len() * stride - xc := pl.table.Table.Columns[xi] - vals := make([]string, netn) - for i, dx := range pl.table.Indexes { - pi := mid + i*stride - if pi < netn && dx < xc.Len() { - vals[pi] = xc.String1D(dx) - } - } - plt.NominalX(vals...) - - pl.configPlot(plt) - pl.plot = plt -} diff --git a/plot/plotcore/enumgen.go b/plot/plotcore/enumgen.go deleted file mode 100644 index 3ea13f9f1c..0000000000 --- a/plot/plotcore/enumgen.go +++ /dev/null @@ -1,50 +0,0 @@ -// Code generated by "core generate"; DO NOT EDIT. - -package plotcore - -import ( - "cogentcore.org/core/enums" -) - -var _PlotTypesValues = []PlotTypes{0, 1} - -// PlotTypesN is the highest valid value for type PlotTypes, plus one. -const PlotTypesN PlotTypes = 2 - -var _PlotTypesValueMap = map[string]PlotTypes{`XY`: 0, `Bar`: 1} - -var _PlotTypesDescMap = map[PlotTypes]string{0: `XY is a standard line / point plot.`, 1: `Bar plots vertical bars.`} - -var _PlotTypesMap = map[PlotTypes]string{0: `XY`, 1: `Bar`} - -// String returns the string representation of this PlotTypes value. -func (i PlotTypes) String() string { return enums.String(i, _PlotTypesMap) } - -// SetString sets the PlotTypes value from its string representation, -// and returns an error if the string is invalid. -func (i *PlotTypes) SetString(s string) error { - return enums.SetString(i, s, _PlotTypesValueMap, "PlotTypes") -} - -// Int64 returns the PlotTypes value as an int64. -func (i PlotTypes) Int64() int64 { return int64(i) } - -// SetInt64 sets the PlotTypes value from an int64. -func (i *PlotTypes) SetInt64(in int64) { *i = PlotTypes(in) } - -// Desc returns the description of the PlotTypes value. -func (i PlotTypes) Desc() string { return enums.Desc(i, _PlotTypesDescMap) } - -// PlotTypesValues returns all possible values for the type PlotTypes. -func PlotTypesValues() []PlotTypes { return _PlotTypesValues } - -// Values returns all possible values for the type PlotTypes. -func (i PlotTypes) Values() []enums.Enum { return enums.Values(_PlotTypesValues) } - -// MarshalText implements the [encoding.TextMarshaler] interface. -func (i PlotTypes) MarshalText() ([]byte, error) { return []byte(i.String()), nil } - -// UnmarshalText implements the [encoding.TextUnmarshaler] interface. -func (i *PlotTypes) UnmarshalText(text []byte) error { - return enums.UnmarshalText(i, text, "PlotTypes") -} diff --git a/plot/plotcore/options.go b/plot/plotcore/options.go deleted file mode 100644 index 5b91e3d865..0000000000 --- a/plot/plotcore/options.go +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package plotcore - -import ( - "image" - "strings" - - "cogentcore.org/core/base/option" - "cogentcore.org/core/base/reflectx" - "cogentcore.org/core/math32/minmax" - "cogentcore.org/core/plot" - "cogentcore.org/core/plot/plots" - "cogentcore.org/core/tensor/table" -) - -// PlotOptions are options for the overall plot. -type PlotOptions struct { //types:add - - // optional title at top of plot - Title string - - // type of plot to generate. For a Bar plot, items are plotted ordinally by row and the XAxis is optional - Type PlotTypes - - // whether to plot lines - Lines bool `default:"true"` - - // whether to plot points with symbols - Points bool - - // width of lines - LineWidth float32 `default:"1"` - - // size of points - PointSize float32 `default:"3"` - - // the shape used to draw points - PointShape plots.Shapes - - // width of bars for bar plot, as fraction of available space (1 = no gaps) - BarWidth float32 `min:"0.01" max:"1" default:"0.8"` - - // if true, draw lines that connect points with a negative X-axis direction; - // otherwise there is a break in the line. - // default is false, so that repeated series of data across the X axis - // are plotted separately. - NegativeXDraw bool - - // Scale multiplies the plot DPI value, to change the overall scale - // of the rendered plot. Larger numbers produce larger scaling. - // Typically use larger numbers when generating plots for inclusion in - // documents or other cases where the overall plot size will be small. - Scale float32 `default:"1,2"` - - // what column to use for the common X axis. if empty or not found, - // the row number is used. This optional for Bar plots, if present and - // Legend is also present, then an extra space will be put between X values. - XAxis string - - // optional column for adding a separate colored / styled line or bar - // according to this value, and acts just like a separate Y variable, - // crossed with Y variables. - Legend string - - // position of the Legend - LegendPosition plot.LegendPosition `display:"inline"` - - // rotation of the X Axis labels, in degrees - XAxisRotation float32 - - // optional label to use for XAxis instead of column name - XAxisLabel string - - // optional label to use for YAxis -- if empty, first column name is used - YAxisLabel string -} - -// defaults sets defaults if unset values are present. -func (po *PlotOptions) defaults() { - if po.LineWidth == 0 { - po.LineWidth = 1 - po.Lines = true - po.Points = false - po.PointSize = 3 - po.BarWidth = .8 - po.LegendPosition.Defaults() - } - if po.Scale == 0 { - po.Scale = 1 - } -} - -// fromMeta sets plot options from meta data. -func (po *PlotOptions) fromMeta(dt *table.Table) { - po.FromMetaMap(dt.MetaData) -} - -// metaMapLower tries meta data access by lower-case version of key too -func metaMapLower(meta map[string]string, key string) (string, bool) { - vl, has := meta[key] - if has { - return vl, has - } - vl, has = meta[strings.ToLower(key)] - return vl, has -} - -// FromMetaMap sets plot options from meta data map. -func (po *PlotOptions) FromMetaMap(meta map[string]string) { - if typ, has := metaMapLower(meta, "Type"); has { - po.Type.SetString(typ) - } - if op, has := metaMapLower(meta, "Lines"); has { - if op == "+" || op == "true" { - po.Lines = true - } else { - po.Lines = false - } - } - if op, has := metaMapLower(meta, "Points"); has { - if op == "+" || op == "true" { - po.Points = true - } else { - po.Points = false - } - } - if lw, has := metaMapLower(meta, "LineWidth"); has { - po.LineWidth, _ = reflectx.ToFloat32(lw) - } - if ps, has := metaMapLower(meta, "PointSize"); has { - po.PointSize, _ = reflectx.ToFloat32(ps) - } - if bw, has := metaMapLower(meta, "BarWidth"); has { - po.BarWidth, _ = reflectx.ToFloat32(bw) - } - if op, has := metaMapLower(meta, "NegativeXDraw"); has { - if op == "+" || op == "true" { - po.NegativeXDraw = true - } else { - po.NegativeXDraw = false - } - } - if scl, has := metaMapLower(meta, "Scale"); has { - po.Scale, _ = reflectx.ToFloat32(scl) - } - if xc, has := metaMapLower(meta, "XAxis"); has { - po.XAxis = xc - } - if lc, has := metaMapLower(meta, "Legend"); has { - po.Legend = lc - } - if xrot, has := metaMapLower(meta, "XAxisRotation"); has { - po.XAxisRotation, _ = reflectx.ToFloat32(xrot) - } - if lb, has := metaMapLower(meta, "XAxisLabel"); has { - po.XAxisLabel = lb - } - if lb, has := metaMapLower(meta, "YAxisLabel"); has { - po.YAxisLabel = lb - } -} - -// ColumnOptions are options for plotting one column of data. -type ColumnOptions struct { //types:add - - // whether to plot this column - On bool - - // name of column being plotting - Column string - - // whether to plot lines; uses the overall plot option if unset - Lines option.Option[bool] - - // whether to plot points with symbols; uses the overall plot option if unset - Points option.Option[bool] - - // the width of lines; uses the overall plot option if unset - LineWidth option.Option[float32] - - // the size of points; uses the overall plot option if unset - PointSize option.Option[float32] - - // the shape used to draw points; uses the overall plot option if unset - PointShape option.Option[plots.Shapes] - - // effective range of data to plot -- either end can be fixed - Range minmax.Range32 `display:"inline"` - - // full actual range of data -- only valid if specifically computed - FullRange minmax.F32 `display:"inline"` - - // color to use when plotting the line / column - Color image.Image - - // desired number of ticks - NTicks int - - // if specified, this is an alternative label to use when plotting - Label string - - // if column has n-dimensional tensor cells in each row, this is the index within each cell to plot -- use -1 to plot *all* indexes as separate lines - TensorIndex int - - // specifies a column containing error bars for this column - ErrColumn string - - // if true this is a string column -- plots as labels - IsString bool `edit:"-"` -} - -// defaults sets defaults if unset values are present. -func (co *ColumnOptions) defaults() { - if co.NTicks == 0 { - co.NTicks = 10 - } -} - -// getLabel returns the effective label of the column. -func (co *ColumnOptions) getLabel() string { - if co.Label != "" { - return co.Label - } - return co.Column -} - -// fromMetaMap sets column options from meta data map. -func (co *ColumnOptions) fromMetaMap(meta map[string]string) { - if op, has := metaMapLower(meta, co.Column+":On"); has { - if op == "+" || op == "true" || op == "" { - co.On = true - } else { - co.On = false - } - } - if op, has := metaMapLower(meta, co.Column+":Off"); has { - if op == "+" || op == "true" || op == "" { - co.On = false - } else { - co.On = true - } - } - if op, has := metaMapLower(meta, co.Column+":FixMin"); has { - if op == "+" || op == "true" { - co.Range.FixMin = true - } else { - co.Range.FixMin = false - } - } - if op, has := metaMapLower(meta, co.Column+":FixMax"); has { - if op == "+" || op == "true" { - co.Range.FixMax = true - } else { - co.Range.FixMax = false - } - } - if op, has := metaMapLower(meta, co.Column+":FloatMin"); has { - if op == "+" || op == "true" { - co.Range.FixMin = false - } else { - co.Range.FixMin = true - } - } - if op, has := metaMapLower(meta, co.Column+":FloatMax"); has { - if op == "+" || op == "true" { - co.Range.FixMax = false - } else { - co.Range.FixMax = true - } - } - if vl, has := metaMapLower(meta, co.Column+":Max"); has { - co.Range.Max, _ = reflectx.ToFloat32(vl) - } - if vl, has := metaMapLower(meta, co.Column+":Min"); has { - co.Range.Min, _ = reflectx.ToFloat32(vl) - } - if lb, has := metaMapLower(meta, co.Column+":Label"); has { - co.Label = lb - } - if lb, has := metaMapLower(meta, co.Column+":ErrColumn"); has { - co.ErrColumn = lb - } - if vl, has := metaMapLower(meta, co.Column+":TensorIndex"); has { - iv, _ := reflectx.ToInt(vl) - co.TensorIndex = int(iv) - } -} - -// PlotTypes are different types of plots. -type PlotTypes int32 //enums:enum - -const ( - // XY is a standard line / point plot. - XY PlotTypes = iota - - // Bar plots vertical bars. - Bar -) diff --git a/plot/plotcore/plot.go b/plot/plotcore/plot.go deleted file mode 100644 index 61b3e27fb2..0000000000 --- a/plot/plotcore/plot.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package plotcore - -import ( - "fmt" - "image" - "image/draw" - - "cogentcore.org/core/colors" - "cogentcore.org/core/core" - "cogentcore.org/core/cursors" - "cogentcore.org/core/events" - "cogentcore.org/core/plot" - "cogentcore.org/core/styles" - "cogentcore.org/core/styles/abilities" - "cogentcore.org/core/styles/states" - "cogentcore.org/core/styles/units" -) - -// Plot is a widget that renders a [plot.Plot] object. -// If it is not [states.ReadOnly], the user can pan and zoom the graph. -// See [PlotEditor] for an interactive interface for selecting columns to view. -type Plot struct { - core.WidgetBase - - // Scale multiplies the plot DPI value, to change the overall scale - // of the rendered plot. Larger numbers produce larger scaling. - // Typically use larger numbers when generating plots for inclusion in - // documents or other cases where the overall plot size will be small. - Scale float32 - - // Plot is the Plot to display in this widget - Plot *plot.Plot `set:"-"` - - // SetRangesFunc, if set, is called to adjust the data ranges - // after the point when these ranges are updated based on the plot data. - SetRangesFunc func() -} - -// SetPlot sets the plot to given Plot, and calls UpdatePlot to ensure it is -// drawn at the current size of this widget -func (pt *Plot) SetPlot(pl *plot.Plot) { - if pl != nil && pt.Plot != nil && pt.Plot.Pixels != nil { - pl.DPI = pt.Scale * pt.Styles.UnitContext.DPI - pl.SetPixels(pt.Plot.Pixels) // re-use the image! - } - pt.Plot = pl - pt.updatePlot() -} - -// updatePlot draws the current plot at the size of the current widget, -// and triggers a Render so the widget will be rendered. -func (pt *Plot) updatePlot() { - if pt.Plot == nil { - pt.NeedsRender() - return - } - sz := pt.Geom.Size.Actual.Content.ToPoint() - if sz == (image.Point{}) { - return - } - pt.Plot.DPI = pt.Scale * pt.Styles.UnitContext.DPI - pt.Plot.Resize(sz) - if pt.SetRangesFunc != nil { - pt.SetRangesFunc() - } - pt.Plot.Draw() - pt.NeedsRender() -} - -func (pt *Plot) Init() { - pt.WidgetBase.Init() - pt.Scale = 1 - pt.Styler(func(s *styles.Style) { - s.Min.Set(units.Dp(256)) - ro := pt.IsReadOnly() - s.SetAbilities(!ro, abilities.Slideable, abilities.Activatable, abilities.Scrollable) - if !ro { - if s.Is(states.Active) { - s.Cursor = cursors.Grabbing - s.StateLayer = 0 - } else { - s.Cursor = cursors.Grab - } - } - }) - - pt.On(events.SlideMove, func(e events.Event) { - e.SetHandled() - if pt.Plot == nil { - return - } - del := e.PrevDelta() - dx := -float32(del.X) * (pt.Plot.X.Max - pt.Plot.X.Min) * 0.0008 - dy := float32(del.Y) * (pt.Plot.Y.Max - pt.Plot.Y.Min) * 0.0008 - pt.Plot.X.Min += dx - pt.Plot.X.Max += dx - pt.Plot.Y.Min += dy - pt.Plot.Y.Max += dy - pt.updatePlot() - pt.NeedsRender() - }) - - pt.On(events.Scroll, func(e events.Event) { - e.SetHandled() - if pt.Plot == nil { - return - } - se := e.(*events.MouseScroll) - sc := 1 + (float32(se.Delta.Y) * 0.002) - pt.Plot.X.Min *= sc - pt.Plot.X.Max *= sc - pt.Plot.Y.Min *= sc - pt.Plot.Y.Max *= sc - pt.updatePlot() - pt.NeedsRender() - }) -} - -func (pt *Plot) WidgetTooltip(pos image.Point) (string, image.Point) { - if pos == image.Pt(-1, -1) { - return "_", image.Point{} - } - if pt.Plot == nil { - return pt.Tooltip, pt.DefaultTooltipPos() - } - wpos := pos.Sub(pt.Geom.ContentBBox.Min) - _, idx, dist, data, _, legend := pt.Plot.ClosestDataToPixel(wpos.X, wpos.Y) - if dist <= 10 { - return fmt.Sprintf("%s[%d]: (%g, %g)", legend, idx, data.X, data.Y), pos - } - return pt.Tooltip, pt.DefaultTooltipPos() -} - -func (pt *Plot) SizeFinal() { - pt.WidgetBase.SizeFinal() - pt.updatePlot() -} - -func (pt *Plot) Render() { - pt.WidgetBase.Render() - - r := pt.Geom.ContentBBox - sp := pt.Geom.ScrollOffset() - if pt.Plot == nil || pt.Plot.Pixels == nil { - draw.Draw(pt.Scene.Pixels, r, colors.Scheme.Surface, sp, draw.Src) - return - } - draw.Draw(pt.Scene.Pixels, r, pt.Plot.Pixels, sp, draw.Src) -} diff --git a/plot/plotcore/ploteditor.go b/plot/plotcore/ploteditor.go deleted file mode 100644 index f0916e8952..0000000000 --- a/plot/plotcore/ploteditor.go +++ /dev/null @@ -1,668 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package plotcore provides Cogent Core widgets for viewing and editing plots. -package plotcore - -//go:generate core generate - -import ( - "io/fs" - "log/slog" - "path/filepath" - "reflect" - "strings" - "time" - - "cogentcore.org/core/base/errors" - "cogentcore.org/core/base/iox/imagex" - "cogentcore.org/core/colors" - "cogentcore.org/core/core" - "cogentcore.org/core/events" - "cogentcore.org/core/icons" - "cogentcore.org/core/math32" - "cogentcore.org/core/plot" - "cogentcore.org/core/styles" - "cogentcore.org/core/styles/states" - "cogentcore.org/core/system" - "cogentcore.org/core/tensor/table" - "cogentcore.org/core/tensor/tensorcore" - "cogentcore.org/core/tree" -) - -// PlotEditor is a widget that provides an interactive 2D plot -// of selected columns of tabular data, represented by a [table.IndexView] into -// a [table.Table]. Other types of tabular data can be converted into this format. -// The user can change various options for the plot and also modify the underlying data. -type PlotEditor struct { //types:add - core.Frame - - // table is the table of data being plotted. - table *table.IndexView - - // Options are the overall plot options. - Options PlotOptions - - // Columns are the options for each column of the table. - Columns []*ColumnOptions `set:"-"` - - // plot is the plot object. - plot *plot.Plot - - // current svg file - svgFile core.Filename - - // current csv data file - dataFile core.Filename - - // currently doing a plot - inPlot bool - - columnsFrame *core.Frame - plotWidget *Plot -} - -func (pl *PlotEditor) CopyFieldsFrom(frm tree.Node) { - fr := frm.(*PlotEditor) - pl.Frame.CopyFieldsFrom(&fr.Frame) - pl.Options = fr.Options - pl.setIndexView(fr.table) - mx := min(len(pl.Columns), len(fr.Columns)) - for i := 0; i < mx; i++ { - *pl.Columns[i] = *fr.Columns[i] - } -} - -// NewSubPlot returns a [PlotEditor] with its own separate [core.Toolbar], -// suitable for a tab or other element that is not the main plot. -func NewSubPlot(parent ...tree.Node) *PlotEditor { - fr := core.NewFrame(parent...) - tb := core.NewToolbar(fr) - pl := NewPlotEditor(fr) - fr.Styler(func(s *styles.Style) { - s.Direction = styles.Column - s.Grow.Set(1, 1) - }) - tb.Maker(pl.MakeToolbar) - return pl -} - -func (pl *PlotEditor) Init() { - pl.Frame.Init() - - pl.Options.defaults() - pl.Styler(func(s *styles.Style) { - s.Grow.Set(1, 1) - if pl.SizeClass() == core.SizeCompact { - s.Direction = styles.Column - } - }) - - pl.OnShow(func(e events.Event) { - pl.UpdatePlot() - }) - - pl.Updater(func() { - if pl.table != nil && pl.table.Table != nil { - pl.Options.fromMeta(pl.table.Table) - } - }) - tree.AddChildAt(pl, "columns", func(w *core.Frame) { - pl.columnsFrame = w - w.Styler(func(s *styles.Style) { - s.Direction = styles.Column - s.Background = colors.Scheme.SurfaceContainerLow - if w.SizeClass() == core.SizeCompact { - s.Grow.Set(1, 0) - } else { - s.Grow.Set(0, 1) - s.Overflow.Y = styles.OverflowAuto - } - }) - w.Maker(pl.makeColumns) - }) - tree.AddChildAt(pl, "plot", func(w *Plot) { - pl.plotWidget = w - w.Plot = pl.plot - w.Styler(func(s *styles.Style) { - s.Grow.Set(1, 1) - }) - }) -} - -// setIndexView sets the table to view and does Update -// to update the Column list, which will also trigger a Layout -// and updating of the plot on next render pass. -// This is safe to call from a different goroutine. -func (pl *PlotEditor) setIndexView(tab *table.IndexView) *PlotEditor { - pl.table = tab - pl.Update() - return pl -} - -// SetTable sets the table to view and does Update -// to update the Column list, which will also trigger a Layout -// and updating of the plot on next render pass. -// This is safe to call from a different goroutine. -func (pl *PlotEditor) SetTable(tab *table.Table) *PlotEditor { - pl.table = table.NewIndexView(tab) - pl.Update() - return pl -} - -// SetSlice sets the table to a [table.NewSliceTable] -// from the given slice. -func (pl *PlotEditor) SetSlice(sl any) *PlotEditor { - return pl.SetTable(errors.Log1(table.NewSliceTable(sl))) -} - -// ColumnOptions returns the current column options by name -// (to access by index, just use Columns directly). -func (pl *PlotEditor) ColumnOptions(column string) *ColumnOptions { - for _, co := range pl.Columns { - if co.Column == column { - return co - } - } - return nil -} - -// Bool constants for [PlotEditor.SetColumnOptions]. -const ( - On = true - Off = false - FixMin = true - FloatMin = false - FixMax = true - FloatMax = false -) - -// SetColumnOptions sets the main parameters for one column. -func (pl *PlotEditor) SetColumnOptions(column string, on bool, fixMin bool, min float32, fixMax bool, max float32) *ColumnOptions { - co := pl.ColumnOptions(column) - if co == nil { - slog.Error("plotcore.PlotEditor.SetColumnOptions: column not found", "column", column) - return nil - } - co.On = on - co.Range.FixMin = fixMin - if fixMin { - co.Range.Min = min - } - co.Range.FixMax = fixMax - if fixMax { - co.Range.Max = max - } - return co -} - -// SaveSVG saves the plot to an svg -- first updates to ensure that plot is current -func (pl *PlotEditor) SaveSVG(fname core.Filename) { //types:add - pl.UpdatePlot() - // TODO: get plot svg saving working - // pc := pl.PlotChild() - // SaveSVGView(string(fname), pl.Plot, sv, 2) - pl.svgFile = fname -} - -// SavePNG saves the current plot to a png, capturing current render -func (pl *PlotEditor) SavePNG(fname core.Filename) { //types:add - pl.UpdatePlot() - imagex.Save(pl.plot.Pixels, string(fname)) -} - -// SaveCSV saves the Table data to a csv (comma-separated values) file with headers (any delim) -func (pl *PlotEditor) SaveCSV(fname core.Filename, delim table.Delims) { //types:add - pl.table.SaveCSV(fname, delim, table.Headers) - pl.dataFile = fname -} - -// SaveAll saves the current plot to a png, svg, and the data to a tsv -- full save -// Any extension is removed and appropriate extensions are added -func (pl *PlotEditor) SaveAll(fname core.Filename) { //types:add - fn := string(fname) - fn = strings.TrimSuffix(fn, filepath.Ext(fn)) - pl.SaveCSV(core.Filename(fn+".tsv"), table.Tab) - pl.SavePNG(core.Filename(fn + ".png")) - pl.SaveSVG(core.Filename(fn + ".svg")) -} - -// OpenCSV opens the Table data from a csv (comma-separated values) file (or any delim) -func (pl *PlotEditor) OpenCSV(filename core.Filename, delim table.Delims) { //types:add - pl.table.Table.OpenCSV(filename, delim) - pl.dataFile = filename - pl.UpdatePlot() -} - -// OpenFS opens the Table data from a csv (comma-separated values) file (or any delim) -// from the given filesystem. -func (pl *PlotEditor) OpenFS(fsys fs.FS, filename core.Filename, delim table.Delims) { - pl.table.Table.OpenFS(fsys, string(filename), delim) - pl.dataFile = filename - pl.UpdatePlot() -} - -// yLabel returns the Y-axis label -func (pl *PlotEditor) yLabel() string { - if pl.Options.YAxisLabel != "" { - return pl.Options.YAxisLabel - } - for _, cp := range pl.Columns { - if cp.On { - return cp.getLabel() - } - } - return "Y" -} - -// xLabel returns the X-axis label -func (pl *PlotEditor) xLabel() string { - if pl.Options.XAxisLabel != "" { - return pl.Options.XAxisLabel - } - if pl.Options.XAxis != "" { - cp := pl.ColumnOptions(pl.Options.XAxis) - if cp != nil { - return cp.getLabel() - } - return pl.Options.XAxis - } - return "X" -} - -// GoUpdatePlot updates the display based on current IndexView into table. -// This version can be called from goroutines. It does Sequential() on -// the [table.IndexView], under the assumption that it is used for tracking a -// the latest updates of a running process. -func (pl *PlotEditor) GoUpdatePlot() { - if pl == nil || pl.This == nil { - return - } - if core.TheApp.Platform() == system.Web { - time.Sleep(time.Millisecond) // critical to prevent hanging! - } - if !pl.IsVisible() || pl.table == nil || pl.table.Table == nil || pl.inPlot { - return - } - pl.Scene.AsyncLock() - pl.table.Sequential() - pl.genPlot() - pl.NeedsRender() - pl.Scene.AsyncUnlock() -} - -// UpdatePlot updates the display based on current IndexView into table. -// It does not automatically update the [table.IndexView] unless it is -// nil or out date. -func (pl *PlotEditor) UpdatePlot() { - if pl == nil || pl.This == nil { - return - } - if pl.table == nil || pl.table.Table == nil || pl.inPlot { - return - } - if len(pl.Children) != 2 || len(pl.Columns) != pl.table.Table.NumColumns() { - pl.Update() - } - if pl.table.Len() == 0 { - pl.table.Sequential() - } - pl.genPlot() -} - -// genPlot generates the plot and renders it to SVG -// It surrounds operation with InPlot true / false to prevent multiple updates -func (pl *PlotEditor) genPlot() { - if pl.inPlot { - slog.Error("plot: in plot already") // note: this never seems to happen -- could probably nuke - return - } - pl.inPlot = true - if pl.table == nil { - pl.inPlot = false - return - } - if len(pl.table.Indexes) == 0 { - pl.table.Sequential() - } else { - lsti := pl.table.Indexes[pl.table.Len()-1] - if lsti >= pl.table.Table.Rows { // out of date - pl.table.Sequential() - } - } - pl.plot = nil - switch pl.Options.Type { - case XY: - pl.genPlotXY() - case Bar: - pl.genPlotBar() - } - pl.plotWidget.Scale = pl.Options.Scale - pl.plotWidget.SetRangesFunc = func() { - plt := pl.plotWidget.Plot - xi, err := pl.table.Table.ColumnIndex(pl.Options.XAxis) - if err == nil { - xp := pl.Columns[xi] - if xp.Range.FixMin { - plt.X.Min = math32.Min(plt.X.Min, float32(xp.Range.Min)) - } - if xp.Range.FixMax { - plt.X.Max = math32.Max(plt.X.Max, float32(xp.Range.Max)) - } - } - for _, cp := range pl.Columns { // key that this comes at the end, to actually stick - if !cp.On || cp.IsString { - continue - } - if cp.Range.FixMin { - plt.Y.Min = math32.Min(plt.Y.Min, float32(cp.Range.Min)) - } - if cp.Range.FixMax { - plt.Y.Max = math32.Max(plt.Y.Max, float32(cp.Range.Max)) - } - } - } - pl.plotWidget.SetPlot(pl.plot) // redraws etc - pl.inPlot = false -} - -// configPlot configures the given plot based on the plot options. -func (pl *PlotEditor) configPlot(plt *plot.Plot) { - plt.Title.Text = pl.Options.Title - plt.X.Label.Text = pl.xLabel() - plt.Y.Label.Text = pl.yLabel() - plt.Legend.Position = pl.Options.LegendPosition - plt.X.TickText.Style.Rotation = float32(pl.Options.XAxisRotation) -} - -// plotXAxis processes the XAxis and returns its index -func (pl *PlotEditor) plotXAxis(plt *plot.Plot, ixvw *table.IndexView) (xi int, xview *table.IndexView, err error) { - xi, err = ixvw.Table.ColumnIndex(pl.Options.XAxis) - if err != nil { - // log.Println("plot.PlotXAxis: " + err.Error()) - return - } - xview = ixvw - xc := ixvw.Table.Columns[xi] - xp := pl.Columns[xi] - sz := 1 - if xp.Range.FixMin { - plt.X.Min = math32.Min(plt.X.Min, float32(xp.Range.Min)) - } - if xp.Range.FixMax { - plt.X.Max = math32.Max(plt.X.Max, float32(xp.Range.Max)) - } - if xc.NumDims() > 1 { - sz = xc.Len() / xc.DimSize(0) - if xp.TensorIndex > sz || xp.TensorIndex < 0 { - slog.Error("plotcore.PlotEditor.plotXAxis: TensorIndex invalid -- reset to 0") - xp.TensorIndex = 0 - } - } - return -} - -const plotColumnsHeaderN = 2 - -// columnsListUpdate updates the list of columns -func (pl *PlotEditor) columnsListUpdate() { - if pl.table == nil || pl.table.Table == nil { - pl.Columns = nil - return - } - dt := pl.table.Table - nc := dt.NumColumns() - if nc == len(pl.Columns) { - return - } - pl.Columns = make([]*ColumnOptions, nc) - clri := 0 - hasOn := false - for ci := range dt.NumColumns() { - cn := dt.ColumnName(ci) - if pl.Options.XAxis == "" && ci == 0 { - pl.Options.XAxis = cn // x-axis defaults to the first column - } - cp := &ColumnOptions{Column: cn} - cp.defaults() - tcol := dt.Columns[ci] - if tcol.IsString() { - cp.IsString = true - } else { - cp.IsString = false - // we enable the first non-string, non-x-axis, non-first column by default - if !hasOn && cn != pl.Options.XAxis && ci != 0 { - cp.On = true - hasOn = true - } - } - cp.fromMetaMap(pl.table.Table.MetaData) - inc := 1 - if cn == pl.Options.XAxis || tcol.IsString() || tcol.DataType() == reflect.Int || tcol.DataType() == reflect.Int64 || tcol.DataType() == reflect.Int32 || tcol.DataType() == reflect.Uint8 { - inc = 0 - } - cp.Color = colors.Uniform(colors.Spaced(clri)) - pl.Columns[ci] = cp - clri += inc - } -} - -// ColumnsFromMetaMap updates all the column settings from given meta map -func (pl *PlotEditor) ColumnsFromMetaMap(meta map[string]string) { - for _, cp := range pl.Columns { - cp.fromMetaMap(meta) - } -} - -// setAllColumns turns all Columns on or off (except X axis) -func (pl *PlotEditor) setAllColumns(on bool) { - fr := pl.columnsFrame - for i, cli := range fr.Children { - if i < plotColumnsHeaderN { - continue - } - ci := i - plotColumnsHeaderN - cp := pl.Columns[ci] - if cp.Column == pl.Options.XAxis { - continue - } - cp.On = on - cl := cli.(*core.Frame) - sw := cl.Child(0).(*core.Switch) - sw.SetChecked(cp.On) - } - pl.UpdatePlot() - pl.NeedsRender() -} - -// setColumnsByName turns columns on or off if their name contains -// the given string. -func (pl *PlotEditor) setColumnsByName(nameContains string, on bool) { //types:add - fr := pl.columnsFrame - for i, cli := range fr.Children { - if i < plotColumnsHeaderN { - continue - } - ci := i - plotColumnsHeaderN - cp := pl.Columns[ci] - if cp.Column == pl.Options.XAxis { - continue - } - if !strings.Contains(cp.Column, nameContains) { - continue - } - cp.On = on - cl := cli.(*core.Frame) - sw := cl.Child(0).(*core.Switch) - sw.SetChecked(cp.On) - } - pl.UpdatePlot() - pl.NeedsRender() -} - -// makeColumns makes the Plans for columns -func (pl *PlotEditor) makeColumns(p *tree.Plan) { - pl.columnsListUpdate() - tree.Add(p, func(w *core.Frame) { - tree.AddChild(w, func(w *core.Button) { - w.SetText("Clear").SetIcon(icons.ClearAll).SetType(core.ButtonAction) - w.SetTooltip("Turn all columns off") - w.OnClick(func(e events.Event) { - pl.setAllColumns(false) - }) - }) - tree.AddChild(w, func(w *core.Button) { - w.SetText("Search").SetIcon(icons.Search).SetType(core.ButtonAction) - w.SetTooltip("Select columns by column name") - w.OnClick(func(e events.Event) { - core.CallFunc(pl, pl.setColumnsByName) - }) - }) - }) - tree.Add(p, func(w *core.Separator) {}) - for _, cp := range pl.Columns { - tree.AddAt(p, cp.Column, func(w *core.Frame) { - w.Styler(func(s *styles.Style) { - s.CenterAll() - }) - tree.AddChild(w, func(w *core.Switch) { - w.SetType(core.SwitchCheckbox).SetTooltip("Turn this column on or off") - w.OnChange(func(e events.Event) { - cp.On = w.IsChecked() - pl.UpdatePlot() - }) - w.Updater(func() { - xaxis := cp.Column == pl.Options.XAxis || cp.Column == pl.Options.Legend - w.SetState(xaxis, states.Disabled, states.Indeterminate) - if xaxis { - cp.On = false - } else { - w.SetChecked(cp.On) - } - }) - }) - tree.AddChild(w, func(w *core.Button) { - w.SetText(cp.Column).SetType(core.ButtonAction).SetTooltip("Edit column options including setting it as the x-axis or legend") - w.OnClick(func(e events.Event) { - update := func() { - if core.TheApp.Platform().IsMobile() { - pl.Update() - return - } - // we must be async on multi-window platforms since - // it is coming from a separate window - pl.AsyncLock() - pl.Update() - pl.AsyncUnlock() - } - d := core.NewBody("Column options") - core.NewForm(d).SetStruct(cp). - OnChange(func(e events.Event) { - update() - }) - d.AddTopBar(func(bar *core.Frame) { - core.NewToolbar(bar).Maker(func(p *tree.Plan) { - tree.Add(p, func(w *core.Button) { - w.SetText("Set x-axis").OnClick(func(e events.Event) { - pl.Options.XAxis = cp.Column - update() - }) - }) - tree.Add(p, func(w *core.Button) { - w.SetText("Set legend").OnClick(func(e events.Event) { - pl.Options.Legend = cp.Column - update() - }) - }) - }) - }) - d.RunWindowDialog(pl) - }) - }) - }) - } -} - -func (pl *PlotEditor) MakeToolbar(p *tree.Plan) { - if pl.table == nil { - return - } - tree.Add(p, func(w *core.Button) { - w.SetIcon(icons.PanTool). - SetTooltip("toggle the ability to zoom and pan the view").OnClick(func(e events.Event) { - pw := pl.plotWidget - pw.SetReadOnly(!pw.IsReadOnly()) - pw.Restyle() - }) - }) - // tree.Add(p, func(w *core.Button) { - // w.SetIcon(icons.ArrowForward). - // SetTooltip("turn on select mode for selecting Plot elements"). - // OnClick(func(e events.Event) { - // fmt.Println("this will select select mode") - // }) - // }) - tree.Add(p, func(w *core.Separator) {}) - - tree.Add(p, func(w *core.Button) { - w.SetText("Update").SetIcon(icons.Update). - SetTooltip("update fully redraws display, reflecting any new settings etc"). - OnClick(func(e events.Event) { - pl.UpdateWidget() - pl.UpdatePlot() - }) - }) - tree.Add(p, func(w *core.Button) { - w.SetText("Options").SetIcon(icons.Settings). - SetTooltip("Options for how the plot is rendered"). - OnClick(func(e events.Event) { - d := core.NewBody("Plot options") - core.NewForm(d).SetStruct(&pl.Options). - OnChange(func(e events.Event) { - pl.GoUpdatePlot() - }) - d.RunWindowDialog(pl) - }) - }) - tree.Add(p, func(w *core.Button) { - w.SetText("Table").SetIcon(icons.Edit). - SetTooltip("open a Table window of the data"). - OnClick(func(e events.Event) { - d := core.NewBody(pl.Name + " Data") - tv := tensorcore.NewTable(d).SetTable(pl.table.Table) - d.AddTopBar(func(bar *core.Frame) { - core.NewToolbar(bar).Maker(tv.MakeToolbar) - }) - d.RunWindowDialog(pl) - }) - }) - tree.Add(p, func(w *core.Separator) {}) - - tree.Add(p, func(w *core.Button) { - w.SetText("Save").SetIcon(icons.Save).SetMenu(func(m *core.Scene) { - core.NewFuncButton(m).SetFunc(pl.SaveSVG).SetIcon(icons.Save) - core.NewFuncButton(m).SetFunc(pl.SavePNG).SetIcon(icons.Save) - core.NewFuncButton(m).SetFunc(pl.SaveCSV).SetIcon(icons.Save) - core.NewSeparator(m) - core.NewFuncButton(m).SetFunc(pl.SaveAll).SetIcon(icons.Save) - }) - }) - tree.Add(p, func(w *core.FuncButton) { - w.SetFunc(pl.OpenCSV).SetIcon(icons.Open) - }) - tree.Add(p, func(w *core.Separator) {}) - tree.Add(p, func(w *core.FuncButton) { - w.SetFunc(pl.table.FilterColumnName).SetText("Filter").SetIcon(icons.FilterAlt) - w.SetAfterFunc(pl.UpdatePlot) - }) - tree.Add(p, func(w *core.FuncButton) { - w.SetFunc(pl.table.Sequential).SetText("Unfilter").SetIcon(icons.FilterAltOff) - w.SetAfterFunc(pl.UpdatePlot) - }) -} - -func (pt *PlotEditor) SizeFinal() { - pt.Frame.SizeFinal() - pt.UpdatePlot() -} diff --git a/plot/plotcore/ploteditor_test.go b/plot/plotcore/ploteditor_test.go deleted file mode 100644 index 2ecaa13bd8..0000000000 --- a/plot/plotcore/ploteditor_test.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package plotcore - -import ( - "testing" - - "cogentcore.org/core/core" - "cogentcore.org/core/tensor/table" -) - -type Data struct { - City string - Population float32 - Area float32 -} - -func TestTablePlotEditor(t *testing.T) { - b := core.NewBody() - - epc := table.NewTable("epc") - epc.OpenCSV("testdata/ra25epoch.tsv", table.Tab) - - pl := NewPlotEditor(b) - pl.Options.Title = "RA25 Epoch Train" - pl.Options.XAxis = "Epoch" - // pl.Options.Scale = 2 - pl.Options.Points = true - pl.SetTable(epc) - pl.ColumnOptions("UnitErr").On = true - b.AddTopBar(func(bar *core.Frame) { - core.NewToolbar(bar).Maker(pl.MakeToolbar) - }) - b.AssertRender(t, "table") -} - -func TestSlicePlotEditor(t *testing.T) { - t.Skip("TODO: this test randomly hangs on CI") - data := []Data{ - {"Davis", 62000, 500}, - {"Boulder", 85000, 800}, - } - - b := core.NewBody() - - pl := NewPlotEditor(b) - pl.Options.Title = "Slice Data" - pl.Options.Points = true - pl.SetSlice(data) - b.AddTopBar(func(bar *core.Frame) { - core.NewToolbar(bar).Maker(pl.MakeToolbar) - }) - - b.AssertRender(t, "slice") -} diff --git a/plot/plotcore/tablexy.go b/plot/plotcore/tablexy.go deleted file mode 100644 index 93d22fdc3e..0000000000 --- a/plot/plotcore/tablexy.go +++ /dev/null @@ -1,250 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package plotcore - -import ( - "cogentcore.org/core/base/errors" - "cogentcore.org/core/math32" - "cogentcore.org/core/math32/minmax" - "cogentcore.org/core/plot" - "cogentcore.org/core/plot/plots" - "cogentcore.org/core/tensor/table" -) - -// tableXY selects two columns from a [table.Table] data table to plot in a [plot.Plot], -// satisfying the [plot.XYer], [plot.Valuer], [plot.Labeler], and [plots.YErrorer] interfaces. -// For Tensor-valued cells, Index's specify tensor cell. -// Also satisfies the plot/plots.Labeler interface for labels attached to a line, and -// plot/plots.YErrorer for error bars. -type tableXY struct { - - // the index view of data table to plot from - table *table.IndexView - - // the indexes of the tensor columns to use for the X and Y data, respectively - xColumn, yColumn int - - // numer of elements in each row of data -- 1 for scalar, > 1 for multi-dimensional - xRowSize, yRowSize int - - // the indexes of the element within each tensor cell if cells are n-dimensional, respectively - xIndex, yIndex int - - // the column to use for returning a label using Label interface -- for string cols - labelColumn int - - // the column to use for returning errorbars (+/- given value) -- if YColumn is tensor then this must also be a tensor and given YIndex used - errColumn int - - // range constraints on Y values - yRange minmax.Range32 -} - -var _ plot.XYer = &tableXY{} -var _ plot.Valuer = &tableXY{} -var _ plot.Labeler = &tableXY{} -var _ plots.YErrorer = &tableXY{} - -// newTableXY returns a new XY plot view onto the given IndexView of table.Table (makes a copy), -// from given column indexes, and tensor indexes within each cell. -// Column indexes are enforced to be valid, with an error message if they are not. -func newTableXY(dt *table.IndexView, xcol, xtsrIndex, ycol, ytsrIndex int, yrng minmax.Range32) (*tableXY, error) { - txy := &tableXY{table: dt.Clone(), xColumn: xcol, yColumn: ycol, xIndex: xtsrIndex, yIndex: ytsrIndex, yRange: yrng} - return txy, txy.validate() -} - -// newTableXYName returns a new XY plot view onto the given IndexView of table.Table (makes a copy), -// from given column name and tensor indexes within each cell. -// Column indexes are enforced to be valid, with an error message if they are not. -func newTableXYName(dt *table.IndexView, xi, xtsrIndex int, ycol string, ytsrIndex int, yrng minmax.Range32) (*tableXY, error) { - yi, err := dt.Table.ColumnIndex(ycol) - if errors.Log(err) != nil { - return nil, err - } - txy := &tableXY{table: dt.Clone(), xColumn: xi, yColumn: yi, xIndex: xtsrIndex, yIndex: ytsrIndex, yRange: yrng} - return txy, txy.validate() -} - -// validate returns error message if column indexes are invalid, else nil -// it also sets column indexes to 0 so nothing crashes. -func (txy *tableXY) validate() error { - if txy.table == nil { - return errors.New("eplot.TableXY table is nil") - } - nc := txy.table.Table.NumColumns() - if txy.xColumn >= nc || txy.xColumn < 0 { - txy.xColumn = 0 - return errors.New("eplot.TableXY XColumn index invalid -- reset to 0") - } - if txy.yColumn >= nc || txy.yColumn < 0 { - txy.yColumn = 0 - return errors.New("eplot.TableXY YColumn index invalid -- reset to 0") - } - xc := txy.table.Table.Columns[txy.xColumn] - yc := txy.table.Table.Columns[txy.yColumn] - if xc.NumDims() > 1 { - _, txy.xRowSize = xc.RowCellSize() - // note: index already validated - } - if yc.NumDims() > 1 { - _, txy.yRowSize = yc.RowCellSize() - if txy.yIndex >= txy.yRowSize || txy.yIndex < 0 { - txy.yIndex = 0 - return errors.New("eplot.TableXY Y TensorIndex invalid -- reset to 0") - } - } - txy.filterValues() - return nil -} - -// filterValues removes items with NaN values, and out of Y range -func (txy *tableXY) filterValues() { - txy.table.Filter(func(et *table.Table, row int) bool { - xv := txy.tRowXValue(row) - yv := txy.tRowValue(row) - if math32.IsNaN(yv) || math32.IsNaN(xv) { - return false - } - if txy.yRange.FixMin && yv < txy.yRange.Min { - return false - } - if txy.yRange.FixMax && yv > txy.yRange.Max { - return false - } - return true - }) -} - -// Len returns the number of rows in the view of table -func (txy *tableXY) Len() int { - if txy.table == nil || txy.table.Table == nil { - return 0 - } - return txy.table.Len() -} - -// tRowValue returns the y value at given true table row in table -func (txy *tableXY) tRowValue(row int) float32 { - yc := txy.table.Table.Columns[txy.yColumn] - y := float32(0.0) - switch { - case yc.IsString(): - y = float32(row) - case yc.NumDims() > 1: - _, sz := yc.RowCellSize() - if txy.yIndex < sz && txy.yIndex >= 0 { - y = float32(yc.FloatRowCell(row, txy.yIndex)) - } - default: - y = float32(yc.Float1D(row)) - } - return y -} - -// Value returns the y value at given row in table -func (txy *tableXY) Value(row int) float32 { - if txy.table == nil || txy.table.Table == nil || row >= txy.table.Len() { - return 0 - } - trow := txy.table.Indexes[row] // true table row - yc := txy.table.Table.Columns[txy.yColumn] - y := float32(0.0) - switch { - case yc.IsString(): - y = float32(row) - case yc.NumDims() > 1: - _, sz := yc.RowCellSize() - if txy.yIndex < sz && txy.yIndex >= 0 { - y = float32(yc.FloatRowCell(trow, txy.yIndex)) - } - default: - y = float32(yc.Float1D(trow)) - } - return y -} - -// tRowXValue returns an x value at given actual row in table -func (txy *tableXY) tRowXValue(row int) float32 { - if txy.table == nil || txy.table.Table == nil { - return 0 - } - xc := txy.table.Table.Columns[txy.xColumn] - x := float32(0.0) - switch { - case xc.IsString(): - x = float32(row) - case xc.NumDims() > 1: - _, sz := xc.RowCellSize() - if txy.xIndex < sz && txy.xIndex >= 0 { - x = float32(xc.FloatRowCell(row, txy.xIndex)) - } - default: - x = float32(xc.Float1D(row)) - } - return x -} - -// xValue returns an x value at given row in table -func (txy *tableXY) xValue(row int) float32 { - if txy.table == nil || txy.table.Table == nil || row >= txy.table.Len() { - return 0 - } - trow := txy.table.Indexes[row] // true table row - xc := txy.table.Table.Columns[txy.xColumn] - x := float32(0.0) - switch { - case xc.IsString(): - x = float32(row) - case xc.NumDims() > 1: - _, sz := xc.RowCellSize() - if txy.xIndex < sz && txy.xIndex >= 0 { - x = float32(xc.FloatRowCell(trow, txy.xIndex)) - } - default: - x = float32(xc.Float1D(trow)) - } - return x -} - -// XY returns an x, y pair at given row in table -func (txy *tableXY) XY(row int) (x, y float32) { - if txy.table == nil || txy.table.Table == nil { - return 0, 0 - } - x = txy.xValue(row) - y = txy.Value(row) - return -} - -// Label returns a label for given row in table, implementing [plot.Labeler] interface -func (txy *tableXY) Label(row int) string { - if txy.table == nil || txy.table.Table == nil || row >= txy.table.Len() { - return "" - } - trow := txy.table.Indexes[row] // true table row - return txy.table.Table.Columns[txy.labelColumn].String1D(trow) -} - -// YError returns error bars, implementing [plots.YErrorer] interface. -func (txy *tableXY) YError(row int) (float32, float32) { - if txy.table == nil || txy.table.Table == nil || row >= txy.table.Len() { - return 0, 0 - } - trow := txy.table.Indexes[row] // true table row - ec := txy.table.Table.Columns[txy.errColumn] - eval := float32(0.0) - switch { - case ec.IsString(): - eval = float32(row) - case ec.NumDims() > 1: - _, sz := ec.RowCellSize() - if txy.yIndex < sz && txy.yIndex >= 0 { - eval = float32(ec.FloatRowCell(trow, txy.yIndex)) - } - default: - eval = float32(ec.Float1D(trow)) - } - return -eval, eval -} diff --git a/plot/plotcore/testdata/ra25epoch.tsv b/plot/plotcore/testdata/ra25epoch.tsv deleted file mode 100644 index c7ec83530b..0000000000 --- a/plot/plotcore/testdata/ra25epoch.tsv +++ /dev/null @@ -1,48 +0,0 @@ -|Run |Epoch $RunName #CorSim #UnitErr #PctErr #PctCor #TstCorSim #TstUnitErr #TstPctCor #TstPctErr #PerTrlMSec #Hidden1_ActMAvg #Hidden1_ActMMax #Hidden1_MaxGeM #Hidden1_CorDiff #Hidden1_GiMult #Hidden2_ActMAvg #Hidden2_ActMMax #Hidden2_MaxGeM #Hidden2_CorDiff #Hidden2_GiMult #Output_ActMAvg #Output_ActMMax #Output_MaxGeM #Output_CorDiff #Output_GiMult #Input_ActAvg #Hidden1_PCA_NStrong #Hidden1_PCA_Top5 #Hidden1_PCA_Next5 #Hidden1_PCA_Rest #Hidden2_PCA_NStrong #Hidden2_PCA_Top5 #Hidden2_PCA_Next5 #Hidden2_PCA_Rest #Output_PCA_NStrong #Output_PCA_Top5 #Output_PCA_Next5 #Output_PCA_Rest -0 0 Base 0.002623 0.2525 1 0 0 0 0 0 0 0.07241 0.6488 1.021 0.01213 1 0.07222 0.4663 0.9947 0.08514 1 0.2407 0.6594 0.9931 0.9974 1 0.1537 24 0.1876 0.09636 0.005853 24 0.1082 0.05629 0.004091 10 0.05466 0.01813 0.003332 -0 1 Base 0.305 0.2375 1 0 0 0 0 0 12.42 0.07315 0.646 1.043 0.009661 1 0.07154 0.4906 0.9892 0.06949 1 0.2354 0.6632 0.9878 0.695 1 0.1603 24 0.1876 0.09636 0.005853 24 0.1082 0.05629 0.004091 10 0.05466 0.01813 0.003332 -0 2 Base 0.4382 0.2288 1 0 0 0 0 0 11.02 0.07365 0.6484 1.061 0.01141 1 0.07133 0.5045 0.9881 0.06971 1 0.2351 0.5644 0.9815 0.5618 1 0.1664 24 0.1876 0.09636 0.005853 24 0.1082 0.05629 0.004091 10 0.05466 0.01813 0.003332 -0 3 Base 0.473 0.2275 1 0 0 0 0 0 11.11 0.07348 0.6489 1.078 0.01176 1 0.07126 0.493 0.9866 0.08268 1 0.2335 0.5932 0.9778 0.527 1 0.1718 24 0.1876 0.09636 0.005853 24 0.1082 0.05629 0.004091 10 0.05466 0.01813 0.003332 -0 4 Base 0.5577 0.22 1 0 0.5551 0.2088 0 1 13.14 0.0731 0.6558 1.11 0.01174 1 0.07236 0.4924 0.9821 0.07721 1 0.2391 0.7094 0.9724 0.4423 1 0.1811 24 0.1876 0.09636 0.005853 24 0.1082 0.05629 0.004091 10 0.05466 0.01813 0.003332 -0 5 Base 0.6384 0.1813 1 0 0.5551 0.2088 0 1 11.19 0.07303 0.6465 1.122 0.01148 1 0.07182 0.4917 0.9855 0.0753 1 0.2437 0.7634 0.9705 0.3616 1 0.1851 21 0.2123 0.1116 0.005676 21 0.1164 0.0685 0.003969 12 0.0756 0.02649 0.004503 -0 6 Base 0.6672 0.1763 0.9375 0.0625 0.5551 0.2088 0 1 11.2 0.07343 0.6434 1.134 0.009667 1 0.07291 0.4896 0.9809 0.06602 1 0.2426 0.7192 0.9678 0.3328 1 0.1887 21 0.2123 0.1116 0.005676 21 0.1164 0.0685 0.003969 12 0.0756 0.02649 0.004503 -0 7 Base 0.6644 0.1788 1 0 0.5551 0.2088 0 1 11.14 0.07337 0.6417 1.145 0.01117 1 0.07198 0.4965 0.9799 0.07333 1 0.2482 0.7953 0.9706 0.3356 1 0.192 21 0.2123 0.1116 0.005676 21 0.1164 0.0685 0.003969 12 0.0756 0.02649 0.004503 -0 8 Base 0.7187 0.1388 0.875 0.125 0.5551 0.2088 0 1 11.1 0.07382 0.6503 1.156 0.01262 1 0.07264 0.4992 0.9787 0.08156 1 0.2615 0.8592 0.9712 0.2813 1 0.1949 21 0.2123 0.1116 0.005676 21 0.1164 0.0685 0.003969 12 0.0756 0.02649 0.004503 -0 9 Base 0.7732 0.1088 0.8125 0.1875 0.7553 0.135 0 1 13.01 0.07344 0.6376 1.175 0.008834 1 0.07177 0.5021 0.9802 0.05816 1 0.2536 0.8328 0.9813 0.2268 1 0.1999 21 0.2123 0.1116 0.005676 21 0.1164 0.0685 0.003969 12 0.0756 0.02649 0.004503 -0 10 Base 0.7861 0.1113 0.8125 0.1875 0.7553 0.135 0 1 11.3 0.07344 0.6511 1.183 0.009321 1 0.07226 0.5157 0.9819 0.05857 1 0.2561 0.8696 0.9849 0.2139 1 0.2021 24 0.2092 0.1191 0.006909 24 0.1245 0.06798 0.004171 15 0.1318 0.04342 0.006997 -0 11 Base 0.7785 0.1038 0.75 0.25 0.7553 0.135 0 1 11.02 0.07336 0.6428 1.19 0.01263 1 0.07282 0.4994 0.9843 0.0738 1 0.2584 0.8563 0.9899 0.2215 1 0.204 24 0.2092 0.1191 0.006909 24 0.1245 0.06798 0.004171 15 0.1318 0.04342 0.006997 -0 12 Base 0.8289 0.07625 0.7188 0.2812 0.7553 0.135 0 1 11.02 0.07397 0.6507 1.199 0.01002 1 0.0725 0.5207 0.9899 0.05569 1 0.261 0.9157 0.9903 0.1711 1 0.2057 24 0.2092 0.1191 0.006909 24 0.1245 0.06798 0.004171 15 0.1318 0.04342 0.006997 -0 13 Base 0.8539 0.06375 0.6875 0.3125 0.7553 0.135 0 1 11.07 0.0736 0.6463 1.204 0.009677 1 0.0729 0.5059 0.9907 0.05554 1 0.2537 0.9003 0.9919 0.1461 1 0.2073 24 0.2092 0.1191 0.006909 24 0.1245 0.06798 0.004171 15 0.1318 0.04342 0.006997 -0 14 Base 0.8322 0.0725 0.6875 0.3125 0.8393 0.0725 0.375 0.625 12.77 0.07361 0.6534 1.216 0.01075 1 0.07271 0.5212 0.9916 0.06336 1 0.2677 0.9293 0.999 0.1678 1 0.21 24 0.2092 0.1191 0.006909 24 0.1245 0.06798 0.004171 15 0.1318 0.04342 0.006997 -0 15 Base 0.8727 0.05125 0.5312 0.4688 0.8393 0.0725 0.375 0.625 10.97 0.07341 0.653 1.221 0.009811 1 0.07342 0.5275 0.9929 0.05527 1 0.2599 0.9216 1.003 0.1273 1 0.2112 21 0.2286 0.1285 0.005968 21 0.1289 0.08213 0.003995 16 0.1836 0.06461 0.01035 -0 16 Base 0.881 0.04125 0.5 0.5 0.8393 0.0725 0.375 0.625 10.81 0.07384 0.6527 1.227 0.009199 1 0.0731 0.527 0.9939 0.04453 1 0.2645 0.9465 1.008 0.119 1 0.2122 21 0.2286 0.1285 0.005968 21 0.1289 0.08213 0.003995 16 0.1836 0.06461 0.01035 -0 17 Base 0.8809 0.0425 0.5938 0.4062 0.8393 0.0725 0.375 0.625 11.12 0.07405 0.6622 1.233 0.008919 1 0.07259 0.5308 0.9988 0.04804 1 0.2633 0.9405 1.01 0.1191 1 0.2132 21 0.2286 0.1285 0.005968 21 0.1289 0.08213 0.003995 16 0.1836 0.06461 0.01035 -0 18 Base 0.8872 0.03625 0.4375 0.5625 0.8393 0.0725 0.375 0.625 10.77 0.07405 0.6662 1.238 0.009321 1 0.07355 0.5362 1.001 0.0502 1 0.2612 0.9393 1.012 0.1128 1 0.214 21 0.2286 0.1285 0.005968 21 0.1289 0.08213 0.003995 16 0.1836 0.06461 0.01035 -0 19 Base 0.8825 0.04 0.4375 0.5625 0.9115 0.0225 0.6562 0.3438 12.81 0.07401 0.6626 1.252 0.008725 1 0.07376 0.5265 1.007 0.04535 1 0.2641 0.9256 1.02 0.1175 1 0.2154 21 0.2286 0.1285 0.005968 21 0.1289 0.08213 0.003995 16 0.1836 0.06461 0.01035 -0 20 Base 0.9157 0.02625 0.3125 0.6875 0.9115 0.0225 0.6562 0.3438 10.96 0.07414 0.6556 1.257 0.008213 1 0.07337 0.5475 1.011 0.0385 1 0.2643 0.9627 1.028 0.08429 1 0.216 21 0.2531 0.133 0.006013 21 0.1505 0.07946 0.003921 16 0.231 0.07662 0.0119 -0 21 Base 0.9183 0.0175 0.2812 0.7188 0.9115 0.0225 0.6562 0.3438 10.89 0.07442 0.6688 1.263 0.00851 1 0.07321 0.5387 1.013 0.03879 1 0.2606 0.9467 1.031 0.08168 1 0.2166 21 0.2531 0.133 0.006013 21 0.1505 0.07946 0.003921 16 0.231 0.07662 0.0119 -0 22 Base 0.928 0.01125 0.1875 0.8125 0.9115 0.0225 0.6562 0.3438 10.73 0.07388 0.6622 1.269 0.007702 1 0.07309 0.5295 1.015 0.03272 1 0.2651 0.9686 1.033 0.07197 1 0.2171 21 0.2531 0.133 0.006013 21 0.1505 0.07946 0.003921 16 0.231 0.07662 0.0119 -0 23 Base 0.932 0.0125 0.1875 0.8125 0.9115 0.0225 0.6562 0.3438 10.71 0.07482 0.6648 1.276 0.007855 1 0.07331 0.5547 1.021 0.03814 1 0.2643 0.9709 1.04 0.06799 1 0.2176 21 0.2531 0.133 0.006013 21 0.1505 0.07946 0.003921 16 0.231 0.07662 0.0119 -0 24 Base 0.909 0.0225 0.2188 0.7812 0.9203 0.01875 0.8125 0.1875 12.87 0.07458 0.6704 1.284 0.008471 1 0.0731 0.5418 1.027 0.03311 1 0.2679 0.9753 1.051 0.09104 1 0.2184 21 0.2531 0.133 0.006013 21 0.1505 0.07946 0.003921 16 0.231 0.07662 0.0119 -0 25 Base 0.9181 0.02 0.2188 0.7812 0.9203 0.01875 0.8125 0.1875 10.73 0.075 0.6827 1.289 0.009043 1 0.07381 0.5343 1.029 0.04152 1 0.2652 0.9715 1.054 0.08188 1 0.2187 24 0.2576 0.1187 0.007354 23 0.1501 0.07645 0.004662 17 0.2128 0.09799 0.01463 -0 26 Base 0.9325 0.01125 0.125 0.875 0.9203 0.01875 0.8125 0.1875 10.57 0.0744 0.6656 1.297 0.007468 1 0.07385 0.5538 1.031 0.02995 1 0.2587 0.9746 1.06 0.0675 1 0.219 24 0.2576 0.1187 0.007354 23 0.1501 0.07645 0.004662 17 0.2128 0.09799 0.01463 -0 27 Base 0.9352 0.005 0.125 0.875 0.9203 0.01875 0.8125 0.1875 10.79 0.07504 0.6666 1.303 0.007872 1 0.07343 0.5468 1.033 0.03298 1 0.2617 0.9805 1.067 0.06477 1 0.2194 24 0.2576 0.1187 0.007354 23 0.1501 0.07645 0.004662 17 0.2128 0.09799 0.01463 -0 28 Base 0.9344 0.00875 0.09375 0.9062 0.9203 0.01875 0.8125 0.1875 10.7 0.07497 0.6728 1.309 0.006918 1 0.07319 0.5465 1.037 0.02824 1 0.2624 0.9775 1.07 0.06555 1 0.2196 24 0.2576 0.1187 0.007354 23 0.1501 0.07645 0.004662 17 0.2128 0.09799 0.01463 -0 29 Base 0.9444 0.005 0.125 0.875 0.9404 0.00875 0.875 0.125 12.64 0.07495 0.6559 1.321 0.008182 1 0.07303 0.5548 1.049 0.03021 1 0.259 0.9734 1.081 0.05559 1 0.22 24 0.2576 0.1187 0.007354 23 0.1501 0.07645 0.004662 17 0.2128 0.09799 0.01463 -0 30 Base 0.9536 0.00625 0.125 0.875 0.9404 0.00875 0.875 0.125 10.65 0.07464 0.6754 1.326 0.006765 1 0.07384 0.5558 1.048 0.02781 1 0.2593 0.9811 1.085 0.04638 1 0.2202 20 0.2767 0.1455 0.006508 19 0.1686 0.09263 0.004174 16 0.2711 0.1159 0.01494 -0 31 Base 0.9525 0.00375 0.0625 0.9375 0.9404 0.00875 0.875 0.125 10.8 0.07543 0.679 1.334 0.008205 1 0.07345 0.5503 1.056 0.027 1 0.2554 0.9787 1.09 0.0475 1 0.2203 20 0.2767 0.1455 0.006508 19 0.1686 0.09263 0.004174 16 0.2711 0.1159 0.01494 -0 32 Base 0.9512 0.005 0.09375 0.9062 0.9404 0.00875 0.875 0.125 10.56 0.07593 0.6808 1.34 0.008235 1 0.07356 0.5628 1.059 0.03047 1 0.257 0.9755 1.093 0.04885 1 0.2205 20 0.2767 0.1455 0.006508 19 0.1686 0.09263 0.004174 16 0.2711 0.1159 0.01494 -0 33 Base 0.9525 0.005 0.09375 0.9062 0.9404 0.00875 0.875 0.125 10.65 0.07651 0.6866 1.344 0.007291 1 0.07295 0.5617 1.067 0.02448 1 0.2608 0.9898 1.094 0.04746 1 0.2207 20 0.2767 0.1455 0.006508 19 0.1686 0.09263 0.004174 16 0.2711 0.1159 0.01494 -0 34 Base 0.9511 0.005 0.09375 0.9062 0.9535 0.00125 0.9688 0.03125 12.6 0.07584 0.6793 1.353 0.006549 1 0.07333 0.57 1.072 0.02668 1 0.256 0.9793 1.103 0.04893 1 0.2209 20 0.2767 0.1455 0.006508 19 0.1686 0.09263 0.004174 16 0.2711 0.1159 0.01494 -0 35 Base 0.95 0.00875 0.1562 0.8438 0.9535 0.00125 0.9688 0.03125 10.72 0.07564 0.6955 1.358 0.007209 1 0.07355 0.5729 1.076 0.02475 1 0.2607 0.9845 1.103 0.04996 1 0.221 24 0.2643 0.1444 0.007958 23 0.1695 0.09511 0.005056 18 0.2649 0.1195 0.01812 -0 36 Base 0.9478 0.00625 0.125 0.875 0.9535 0.00125 0.9688 0.03125 10.44 0.0758 0.6768 1.362 0.007061 1 0.07356 0.562 1.079 0.02375 1 0.258 0.9799 1.107 0.05223 1 0.2211 24 0.2643 0.1444 0.007958 23 0.1695 0.09511 0.005056 18 0.2649 0.1195 0.01812 -0 37 Base 0.9577 0.00375 0.0625 0.9375 0.9535 0.00125 0.9688 0.03125 10.5 0.07661 0.701 1.366 0.007002 1 0.07417 0.5805 1.088 0.02054 1 0.2537 0.9802 1.11 0.04235 1 0.2212 24 0.2643 0.1444 0.007958 23 0.1695 0.09511 0.005056 18 0.2649 0.1195 0.01812 -0 38 Base 0.9608 0.00125 0.03125 0.9688 0.9535 0.00125 0.9688 0.03125 10.6 0.07604 0.7044 1.371 0.006446 1 0.07395 0.5717 1.09 0.02204 1 0.2535 0.9792 1.111 0.03917 1 0.2213 24 0.2643 0.1444 0.007958 23 0.1695 0.09511 0.005056 18 0.2649 0.1195 0.01812 -0 39 Base 0.96 0.00375 0.09375 0.9062 0.959 0.0025 0.9375 0.0625 12.57 0.07629 0.6914 1.379 0.007166 1 0.07323 0.5777 1.097 0.02131 1 0.2561 0.9878 1.115 0.04005 1 0.2214 24 0.2643 0.1444 0.007958 23 0.1695 0.09511 0.005056 18 0.2649 0.1195 0.01812 -0 40 Base 0.9624 0.00125 0.03125 0.9688 0.959 0.0025 0.9375 0.0625 10.52 0.07664 0.7003 1.384 0.005926 1 0.07419 0.5797 1.099 0.01999 1 0.258 0.9917 1.118 0.03758 1 0.2214 20 0.3151 0.1524 0.006273 20 0.1988 0.09904 0.00402 17 0.2949 0.1143 0.01642 -0 41 Base 0.9605 0.0025 0.0625 0.9375 0.959 0.0025 0.9375 0.0625 10.53 0.07656 0.6925 1.389 0.006171 1 0.07358 0.5915 1.106 0.01794 1 0.252 0.9934 1.119 0.03954 1 0.2215 20 0.3151 0.1524 0.006273 20 0.1988 0.09904 0.00402 17 0.2949 0.1143 0.01642 -0 42 Base 0.9577 0.0025 0.0625 0.9375 0.959 0.0025 0.9375 0.0625 10.45 0.07676 0.7049 1.391 0.006068 1 0.07406 0.5734 1.112 0.02369 1 0.256 0.9839 1.124 0.04233 1 0.2215 20 0.3151 0.1524 0.006273 20 0.1988 0.09904 0.00402 17 0.2949 0.1143 0.01642 -0 43 Base 0.9703 0 0 1 0.959 0.0025 0.9375 0.0625 10.27 0.07618 0.7057 1.395 0.005172 1 0.07343 0.5834 1.114 0.01613 1 0.254 0.9963 1.127 0.02969 1 0.2216 20 0.3151 0.1524 0.006273 20 0.1988 0.09904 0.00402 17 0.2949 0.1143 0.01642 -0 44 Base 0.97 0.00125 0.03125 0.9688 0.9676 0.0025 0.9375 0.0625 12.32 0.07682 0.7062 1.404 0.005644 1 0.07353 0.5933 1.124 0.01662 1 0.2515 0.993 1.131 0.03004 1 0.2216 20 0.3151 0.1524 0.006273 20 0.1988 0.09904 0.00402 17 0.2949 0.1143 0.01642 -0 45 Base 0.9661 0 0 1 0.9676 0.0025 0.9375 0.0625 10.67 0.07726 0.7074 1.409 0.005708 1 0.07429 0.5932 1.126 0.01833 1 0.255 0.9913 1.131 0.03392 1 0.2217 23 0.308 0.1482 0.008264 23 0.2055 0.09122 0.00535 18 0.3067 0.1335 0.02283 -0 46 Base 0.9682 0 0 1 0.9676 0.0025 0.9375 0.0625 10.34 0.07727 0.705 1.412 0.005763 1 0.07392 0.591 1.128 0.01715 1 0.2516 0.9877 1.132 0.03176 1 0.2217 23 0.308 0.1482 0.008264 23 0.2055 0.09122 0.00535 18 0.3067 0.1335 0.02283 diff --git a/plot/plotcore/typegen.go b/plot/plotcore/typegen.go deleted file mode 100644 index 9a925c9750..0000000000 --- a/plot/plotcore/typegen.go +++ /dev/null @@ -1,40 +0,0 @@ -// Code generated by "core generate"; DO NOT EDIT. - -package plotcore - -import ( - "cogentcore.org/core/tree" - "cogentcore.org/core/types" -) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot/plotcore.PlotOptions", IDName: "plot-options", Doc: "PlotOptions are options for the overall plot.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Fields: []types.Field{{Name: "Title", Doc: "optional title at top of plot"}, {Name: "Type", Doc: "type of plot to generate. For a Bar plot, items are plotted ordinally by row and the XAxis is optional"}, {Name: "Lines", Doc: "whether to plot lines"}, {Name: "Points", Doc: "whether to plot points with symbols"}, {Name: "LineWidth", Doc: "width of lines"}, {Name: "PointSize", Doc: "size of points"}, {Name: "PointShape", Doc: "the shape used to draw points"}, {Name: "BarWidth", Doc: "width of bars for bar plot, as fraction of available space (1 = no gaps)"}, {Name: "NegativeXDraw", Doc: "if true, draw lines that connect points with a negative X-axis direction;\notherwise there is a break in the line.\ndefault is false, so that repeated series of data across the X axis\nare plotted separately."}, {Name: "Scale", Doc: "Scale multiplies the plot DPI value, to change the overall scale\nof the rendered plot. Larger numbers produce larger scaling.\nTypically use larger numbers when generating plots for inclusion in\ndocuments or other cases where the overall plot size will be small."}, {Name: "XAxis", Doc: "what column to use for the common X axis. if empty or not found,\nthe row number is used. This optional for Bar plots, if present and\nLegend is also present, then an extra space will be put between X values."}, {Name: "Legend", Doc: "optional column for adding a separate colored / styled line or bar\naccording to this value, and acts just like a separate Y variable,\ncrossed with Y variables."}, {Name: "LegendPosition", Doc: "position of the Legend"}, {Name: "XAxisRotation", Doc: "rotation of the X Axis labels, in degrees"}, {Name: "XAxisLabel", Doc: "optional label to use for XAxis instead of column name"}, {Name: "YAxisLabel", Doc: "optional label to use for YAxis -- if empty, first column name is used"}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot/plotcore.ColumnOptions", IDName: "column-options", Doc: "ColumnOptions are options for plotting one column of data.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Fields: []types.Field{{Name: "On", Doc: "whether to plot this column"}, {Name: "Column", Doc: "name of column being plotting"}, {Name: "Lines", Doc: "whether to plot lines; uses the overall plot option if unset"}, {Name: "Points", Doc: "whether to plot points with symbols; uses the overall plot option if unset"}, {Name: "LineWidth", Doc: "the width of lines; uses the overall plot option if unset"}, {Name: "PointSize", Doc: "the size of points; uses the overall plot option if unset"}, {Name: "PointShape", Doc: "the shape used to draw points; uses the overall plot option if unset"}, {Name: "Range", Doc: "effective range of data to plot -- either end can be fixed"}, {Name: "FullRange", Doc: "full actual range of data -- only valid if specifically computed"}, {Name: "Color", Doc: "color to use when plotting the line / column"}, {Name: "NTicks", Doc: "desired number of ticks"}, {Name: "Label", Doc: "if specified, this is an alternative label to use when plotting"}, {Name: "TensorIndex", Doc: "if column has n-dimensional tensor cells in each row, this is the index within each cell to plot -- use -1 to plot *all* indexes as separate lines"}, {Name: "ErrColumn", Doc: "specifies a column containing error bars for this column"}, {Name: "IsString", Doc: "if true this is a string column -- plots as labels"}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot/plotcore.Plot", IDName: "plot", Doc: "Plot is a widget that renders a [plot.Plot] object.\nIf it is not [states.ReadOnly], the user can pan and zoom the graph.\nSee [PlotEditor] for an interactive interface for selecting columns to view.", Embeds: []types.Field{{Name: "WidgetBase"}}, Fields: []types.Field{{Name: "Scale", Doc: "Scale multiplies the plot DPI value, to change the overall scale\nof the rendered plot. Larger numbers produce larger scaling.\nTypically use larger numbers when generating plots for inclusion in\ndocuments or other cases where the overall plot size will be small."}, {Name: "Plot", Doc: "Plot is the Plot to display in this widget"}}}) - -// NewPlot returns a new [Plot] with the given optional parent: -// Plot is a widget that renders a [plot.Plot] object. -// If it is not [states.ReadOnly], the user can pan and zoom the graph. -// See [PlotEditor] for an interactive interface for selecting columns to view. -func NewPlot(parent ...tree.Node) *Plot { return tree.New[Plot](parent...) } - -// SetScale sets the [Plot.Scale]: -// Scale multiplies the plot DPI value, to change the overall scale -// of the rendered plot. Larger numbers produce larger scaling. -// Typically use larger numbers when generating plots for inclusion in -// documents or other cases where the overall plot size will be small. -func (t *Plot) SetScale(v float32) *Plot { t.Scale = v; return t } - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot/plotcore.PlotEditor", IDName: "plot-editor", Doc: "PlotEditor is a widget that provides an interactive 2D plot\nof selected columns of tabular data, represented by a [table.IndexView] into\na [table.Table]. Other types of tabular data can be converted into this format.\nThe user can change various options for the plot and also modify the underlying data.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Methods: []types.Method{{Name: "SaveSVG", Doc: "SaveSVG saves the plot to an svg -- first updates to ensure that plot is current", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"fname"}}, {Name: "SavePNG", Doc: "SavePNG saves the current plot to a png, capturing current render", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"fname"}}, {Name: "SaveCSV", Doc: "SaveCSV saves the Table data to a csv (comma-separated values) file with headers (any delim)", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"fname", "delim"}}, {Name: "SaveAll", Doc: "SaveAll saves the current plot to a png, svg, and the data to a tsv -- full save\nAny extension is removed and appropriate extensions are added", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"fname"}}, {Name: "OpenCSV", Doc: "OpenCSV opens the Table data from a csv (comma-separated values) file (or any delim)", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"filename", "delim"}}, {Name: "setColumnsByName", Doc: "setColumnsByName turns columns on or off if their name contains\nthe given string.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"nameContains", "on"}}}, Embeds: []types.Field{{Name: "Frame"}}, Fields: []types.Field{{Name: "table", Doc: "table is the table of data being plotted."}, {Name: "Options", Doc: "Options are the overall plot options."}, {Name: "Columns", Doc: "Columns are the options for each column of the table."}, {Name: "plot", Doc: "plot is the plot object."}, {Name: "svgFile", Doc: "current svg file"}, {Name: "dataFile", Doc: "current csv data file"}, {Name: "inPlot", Doc: "currently doing a plot"}, {Name: "columnsFrame"}, {Name: "plotWidget"}}}) - -// NewPlotEditor returns a new [PlotEditor] with the given optional parent: -// PlotEditor is a widget that provides an interactive 2D plot -// of selected columns of tabular data, represented by a [table.IndexView] into -// a [table.Table]. Other types of tabular data can be converted into this format. -// The user can change various options for the plot and also modify the underlying data. -func NewPlotEditor(parent ...tree.Node) *PlotEditor { return tree.New[PlotEditor](parent...) } - -// SetOptions sets the [PlotEditor.Options]: -// Options are the overall plot options. -func (t *PlotEditor) SetOptions(v PlotOptions) *PlotEditor { t.Options = v; return t } diff --git a/plot/plotcore/xyplot.go b/plot/plotcore/xyplot.go deleted file mode 100644 index eb405ee337..0000000000 --- a/plot/plotcore/xyplot.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package plotcore - -import ( - "fmt" - "log/slog" - - "cogentcore.org/core/base/errors" - "cogentcore.org/core/colors" - "cogentcore.org/core/plot" - "cogentcore.org/core/plot/plots" - "cogentcore.org/core/tensor" - "cogentcore.org/core/tensor/stats/split" - "cogentcore.org/core/tensor/table" -) - -// genPlotXY generates an XY (lines, points) plot, setting Plot variable -func (pl *PlotEditor) genPlotXY() { - plt := plot.New() - - // process xaxis first - xi, xview, err := pl.plotXAxis(plt, pl.table) - if err != nil { - return - } - xp := pl.Columns[xi] - - var lsplit *table.Splits - nleg := 1 - if pl.Options.Legend != "" { - _, err = pl.table.Table.ColumnIndex(pl.Options.Legend) - if err != nil { - slog.Error("plot.Legend", "err", err.Error()) - } else { - errors.Log(xview.SortStableColumnNames([]string{pl.Options.Legend, xp.Column}, table.Ascending)) - lsplit = split.GroupBy(xview, pl.Options.Legend) - nleg = max(lsplit.Len(), 1) - } - } - - var firstXY *tableXY - var strCols []*ColumnOptions - nys := 0 - for _, cp := range pl.Columns { - if !cp.On { - continue - } - if cp.IsString { - strCols = append(strCols, cp) - continue - } - if cp.TensorIndex < 0 { - yc := errors.Log1(pl.table.Table.ColumnByName(cp.Column)) - _, sz := yc.RowCellSize() - nys += sz - } else { - nys++ - } - } - - if nys == 0 { - return - } - - firstXY = nil - yidx := 0 - for _, cp := range pl.Columns { - if !cp.On || cp == xp { - continue - } - if cp.IsString { - continue - } - for li := 0; li < nleg; li++ { - lview := xview - leg := "" - if lsplit != nil && len(lsplit.Values) > li { - leg = lsplit.Values[li][0] - lview = lsplit.Splits[li] - } - nidx := 1 - stidx := cp.TensorIndex - if cp.TensorIndex < 0 { // do all - yc := errors.Log1(pl.table.Table.ColumnByName(cp.Column)) - _, sz := yc.RowCellSize() - nidx = sz - stidx = 0 - } - for ii := 0; ii < nidx; ii++ { - idx := stidx + ii - tix := lview.Clone() - xy, _ := newTableXYName(tix, xi, xp.TensorIndex, cp.Column, idx, cp.Range) - if xy == nil { - continue - } - if firstXY == nil { - firstXY = xy - } - var pts *plots.Scatter - var lns *plots.Line - lbl := cp.getLabel() - clr := cp.Color - if leg != "" { - lbl = leg + " " + lbl - } - if nleg > 1 { - cidx := yidx*nleg + li - clr = colors.Uniform(colors.Spaced(cidx)) - } - if nidx > 1 { - clr = colors.Uniform(colors.Spaced(idx)) - lbl = fmt.Sprintf("%s_%02d", lbl, idx) - } - if cp.Lines.Or(pl.Options.Lines) && cp.Points.Or(pl.Options.Points) { - lns, pts, _ = plots.NewLinePoints(xy) - } else if cp.Points.Or(pl.Options.Points) { - pts, _ = plots.NewScatter(xy) - } else { - lns, _ = plots.NewLine(xy) - } - if lns != nil { - lns.LineStyle.Width.Pt(float32(cp.LineWidth.Or(pl.Options.LineWidth))) - lns.LineStyle.Color = clr - lns.NegativeXDraw = pl.Options.NegativeXDraw - plt.Add(lns) - if pts != nil { - plt.Legend.Add(lbl, lns, pts) - } else { - plt.Legend.Add(lbl, lns) - } - } - if pts != nil { - pts.LineStyle.Color = clr - pts.LineStyle.Width.Pt(float32(cp.LineWidth.Or(pl.Options.LineWidth))) - pts.PointSize.Pt(float32(cp.PointSize.Or(pl.Options.PointSize))) - pts.PointShape = cp.PointShape.Or(pl.Options.PointShape) - plt.Add(pts) - if lns == nil { - plt.Legend.Add(lbl, pts) - } - } - if cp.ErrColumn != "" { - ec := errors.Log1(pl.table.Table.ColumnIndex(cp.ErrColumn)) - if ec >= 0 { - xy.errColumn = ec - eb, _ := plots.NewYErrorBars(xy) - eb.LineStyle.Color = clr - plt.Add(eb) - } - } - } - } - yidx++ - } - if firstXY != nil && len(strCols) > 0 { - for _, cp := range strCols { - xy, _ := newTableXY(xview, xi, xp.TensorIndex, firstXY.yColumn, cp.TensorIndex, firstXY.yRange) - xy.labelColumn, _ = xview.Table.ColumnIndex(cp.Column) - xy.yIndex = firstXY.yIndex - lbls, _ := plots.NewLabels(xy) - if lbls != nil { - plt.Add(lbls) - } - } - } - - // Use string labels for X axis if X is a string - xc := pl.table.Table.Columns[xi] - if xc.IsString() { - xcs := xc.(*tensor.String) - vals := make([]string, pl.table.Len()) - for i, dx := range pl.table.Indexes { - vals[i] = xcs.Values[dx] - } - plt.NominalX(vals...) - } - - pl.configPlot(plt) - pl.plot = plt -} diff --git a/plot/plots/barchart.go b/plot/plots/barchart.go deleted file mode 100644 index 0b20ce7b00..0000000000 --- a/plot/plots/barchart.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright (c) 2019, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This is copied and modified directly from gonum to add better error-bar -// plotting for bar plots, along with multiple groups. - -// Copyright Ā©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package plots - -import ( - "image" - - "cogentcore.org/core/colors" - "cogentcore.org/core/math32" - "cogentcore.org/core/plot" -) - -// A BarChart presents ordinally-organized data with rectangular bars -// with lengths proportional to the data values, and an optional -// error bar ("handle") at the top of the bar using given error value -// (single value, like a standard deviation etc, not drawn below the bar). -// -// Bars are plotted centered at integer multiples of Stride plus Start offset. -// Full data range also includes Pad value to extend range beyond edge bar centers. -// Bar Width is in data units, e.g., should be <= Stride. -// Defaults provide a unit-spaced plot. -type BarChart struct { - // Values are the plotted values - Values plot.Values - - // YErrors is a copy of the Y errors for each point. - Errors plot.Values - - // XYs is the actual pixel plotting coordinates for each value. - XYs plot.XYs - - // PXYs is the actual pixel plotting coordinates for each value. - PXYs plot.XYs - - // Offset is offset added to each X axis value relative to the - // Stride computed value (X = offset + index * Stride) - // Defaults to 1. - Offset float32 - - // Stride is distance between bars. Defaults to 1. - Stride float32 - - // Width is the width of the bars, which should be less than - // the Stride to prevent bar overlap. - // Defaults to .8 - Width float32 - - // Pad is additional space at start / end of data range, to keep bars from - // overflowing ends. This amount is subtracted from Offset - // and added to (len(Values)-1)*Stride -- no other accommodation for bar - // width is provided, so that should be built into this value as well. - // Defaults to 1. - Pad float32 - - // Color is the fill color of the bars. - Color image.Image - - // LineStyle is the style of the line connecting the points. - // Use zero width to disable lines. - LineStyle plot.LineStyle - - // Horizontal dictates whether the bars should be in the vertical - // (default) or horizontal direction. If Horizontal is true, all - // X locations and distances referred to here will actually be Y - // locations and distances. - Horizontal bool - - // stackedOn is the bar chart upon which this bar chart is stacked. - StackedOn *BarChart -} - -// NewBarChart returns a new bar chart with a single bar for each value. -// The bars heights correspond to the values and their x locations correspond -// to the index of their value in the Valuer. Optional error-bar values can be -// provided. -func NewBarChart(vs, ers plot.Valuer) (*BarChart, error) { - values, err := plot.CopyValues(vs) - if err != nil { - return nil, err - } - var errs plot.Values - if ers != nil { - errs, err = plot.CopyValues(ers) - if err != nil { - return nil, err - } - } - b := &BarChart{ - Values: values, - Errors: errs, - } - b.Defaults() - return b, nil -} - -func (b *BarChart) Defaults() { - b.Offset = 1 - b.Stride = 1 - b.Width = .8 - b.Pad = 1 - b.Color = colors.Scheme.OnSurface - b.LineStyle.Defaults() -} - -func (b *BarChart) XYData() (data plot.XYer, pixels plot.XYer) { - data = b.XYs - pixels = b.PXYs - return -} - -// BarHeight returns the maximum y value of the -// ith bar, taking into account any bars upon -// which it is stacked. -func (b *BarChart) BarHeight(i int) float32 { - ht := float32(0.0) - if b == nil { - return 0 - } - if i >= 0 && i < len(b.Values) { - ht += b.Values[i] - } - if b.StackedOn != nil { - ht += b.StackedOn.BarHeight(i) - } - return ht -} - -// StackOn stacks a bar chart on top of another, -// and sets the bar positioning options to that of the -// chart upon which it is being stacked. -func (b *BarChart) StackOn(on *BarChart) { - b.Offset = on.Offset - b.Stride = on.Stride - b.Pad = on.Pad - b.StackedOn = on -} - -// Plot implements the plot.Plotter interface. -func (b *BarChart) Plot(plt *plot.Plot) { - pc := plt.Paint - pc.FillStyle.Color = b.Color - b.LineStyle.SetStroke(plt) - - nv := len(b.Values) - b.XYs = make(plot.XYs, nv) - b.PXYs = make(plot.XYs, nv) - - hw := 0.5 * b.Width - ew := b.Width / 3 - for i, ht := range b.Values { - cat := b.Offset + float32(i)*b.Stride - var bottom, catVal, catMin, catMax, valMin, valMax float32 - var box math32.Box2 - if b.Horizontal { - catVal = plt.PY(cat) - catMin = plt.PY(cat - hw) - catMax = plt.PY(cat + hw) - bottom = b.StackedOn.BarHeight(i) // nil safe - valMin = plt.PX(bottom) - valMax = plt.PX(bottom + ht) - b.XYs[i] = math32.Vec2(bottom+ht, cat) - b.PXYs[i] = math32.Vec2(valMax, catVal) - box.Min.Set(valMin, catMin) - box.Max.Set(valMax, catMax) - } else { - catVal = plt.PX(cat) - catMin = plt.PX(cat - hw) - catMax = plt.PX(cat + hw) - bottom = b.StackedOn.BarHeight(i) // nil safe - valMin = plt.PY(bottom) - valMax = plt.PY(bottom + ht) - b.XYs[i] = math32.Vec2(cat, bottom+ht) - b.PXYs[i] = math32.Vec2(catVal, valMax) - box.Min.Set(catMin, valMin) - box.Max.Set(catMax, valMax) - } - - pc.DrawRectangle(box.Min.X, box.Min.Y, box.Size().X, box.Size().Y) - pc.FillStrokeClear() - - if i < len(b.Errors) { - errval := b.Errors[i] - if b.Horizontal { - eVal := plt.PX(bottom + ht + math32.Abs(errval)) - pc.MoveTo(valMax, catVal) - pc.LineTo(eVal, catVal) - pc.MoveTo(eVal, plt.PY(cat-ew)) - pc.LineTo(eVal, plt.PY(cat+ew)) - } else { - eVal := plt.PY(bottom + ht + math32.Abs(errval)) - pc.MoveTo(catVal, valMax) - pc.LineTo(catVal, eVal) - pc.MoveTo(plt.PX(cat-ew), eVal) - pc.LineTo(plt.PX(cat+ew), eVal) - } - pc.Stroke() - } - } -} - -// DataRange implements the plot.DataRanger interface. -func (b *BarChart) DataRange(plt *plot.Plot) (xmin, xmax, ymin, ymax float32) { - catMin := b.Offset - b.Pad - catMax := b.Offset + float32(len(b.Values)-1)*b.Stride + b.Pad - - valMin := math32.Inf(1) - valMax := math32.Inf(-1) - for i, val := range b.Values { - valBot := b.StackedOn.BarHeight(i) - valTop := valBot + val - if i < len(b.Errors) { - valTop += math32.Abs(b.Errors[i]) - } - valMin = math32.Min(valMin, math32.Min(valBot, valTop)) - valMax = math32.Max(valMax, math32.Max(valBot, valTop)) - } - if !b.Horizontal { - return catMin, catMax, valMin, valMax - } - return valMin, valMax, catMin, catMax -} - -// Thumbnail fulfills the plot.Thumbnailer interface. -func (b *BarChart) Thumbnail(plt *plot.Plot) { - pc := plt.Paint - pc.FillStyle.Color = b.Color - b.LineStyle.SetStroke(plt) - ptb := pc.Bounds - pc.DrawRectangle(float32(ptb.Min.X), float32(ptb.Min.Y), float32(ptb.Size().X), float32(ptb.Size().Y)) - pc.FillStrokeClear() -} diff --git a/plot/plots/doc.go b/plot/plots/doc.go deleted file mode 100644 index 072b3aef99..0000000000 --- a/plot/plots/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Adapted from github.com/gonum/plot: -// Copyright Ā©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package plots defines a variety of standard Plotters for the -// plot package. -// -// Plotters use the primitives provided by the plot package to draw to -// the data area of a plot. This package provides some standard data -// styles such as lines, scatter plots, box plots, labels, and more. -// -// Unlike the gonum/plot package, NaN values are treated as missing -// data points, and are just skipped over. -// -// New* functions return an error if the data contains Inf or is -// empty. Some of the New* functions return other plotter-specific errors -// too. -package plots diff --git a/plot/plots/enumgen.go b/plot/plots/enumgen.go deleted file mode 100644 index b795361bc0..0000000000 --- a/plot/plots/enumgen.go +++ /dev/null @@ -1,87 +0,0 @@ -// Code generated by "core generate"; DO NOT EDIT. - -package plots - -import ( - "cogentcore.org/core/enums" -) - -var _StepKindValues = []StepKind{0, 1, 2, 3} - -// StepKindN is the highest valid value for type StepKind, plus one. -const StepKindN StepKind = 4 - -var _StepKindValueMap = map[string]StepKind{`NoStep`: 0, `PreStep`: 1, `MidStep`: 2, `PostStep`: 3} - -var _StepKindDescMap = map[StepKind]string{0: `NoStep connects two points by simple line`, 1: `PreStep connects two points by following lines: vertical, horizontal.`, 2: `MidStep connects two points by following lines: horizontal, vertical, horizontal. Vertical line is placed in the middle of the interval.`, 3: `PostStep connects two points by following lines: horizontal, vertical.`} - -var _StepKindMap = map[StepKind]string{0: `NoStep`, 1: `PreStep`, 2: `MidStep`, 3: `PostStep`} - -// String returns the string representation of this StepKind value. -func (i StepKind) String() string { return enums.String(i, _StepKindMap) } - -// SetString sets the StepKind value from its string representation, -// and returns an error if the string is invalid. -func (i *StepKind) SetString(s string) error { - return enums.SetString(i, s, _StepKindValueMap, "StepKind") -} - -// Int64 returns the StepKind value as an int64. -func (i StepKind) Int64() int64 { return int64(i) } - -// SetInt64 sets the StepKind value from an int64. -func (i *StepKind) SetInt64(in int64) { *i = StepKind(in) } - -// Desc returns the description of the StepKind value. -func (i StepKind) Desc() string { return enums.Desc(i, _StepKindDescMap) } - -// StepKindValues returns all possible values for the type StepKind. -func StepKindValues() []StepKind { return _StepKindValues } - -// Values returns all possible values for the type StepKind. -func (i StepKind) Values() []enums.Enum { return enums.Values(_StepKindValues) } - -// MarshalText implements the [encoding.TextMarshaler] interface. -func (i StepKind) MarshalText() ([]byte, error) { return []byte(i.String()), nil } - -// UnmarshalText implements the [encoding.TextUnmarshaler] interface. -func (i *StepKind) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "StepKind") } - -var _ShapesValues = []Shapes{0, 1, 2, 3, 4, 5, 6, 7} - -// ShapesN is the highest valid value for type Shapes, plus one. -const ShapesN Shapes = 8 - -var _ShapesValueMap = map[string]Shapes{`Ring`: 0, `Circle`: 1, `Square`: 2, `Box`: 3, `Triangle`: 4, `Pyramid`: 5, `Plus`: 6, `Cross`: 7} - -var _ShapesDescMap = map[Shapes]string{0: `Ring is the outline of a circle`, 1: `Circle is a solid circle`, 2: `Square is the outline of a square`, 3: `Box is a filled square`, 4: `Triangle is the outline of a triangle`, 5: `Pyramid is a filled triangle`, 6: `Plus is a plus sign`, 7: `Cross is a big X`} - -var _ShapesMap = map[Shapes]string{0: `Ring`, 1: `Circle`, 2: `Square`, 3: `Box`, 4: `Triangle`, 5: `Pyramid`, 6: `Plus`, 7: `Cross`} - -// String returns the string representation of this Shapes value. -func (i Shapes) String() string { return enums.String(i, _ShapesMap) } - -// SetString sets the Shapes value from its string representation, -// and returns an error if the string is invalid. -func (i *Shapes) SetString(s string) error { return enums.SetString(i, s, _ShapesValueMap, "Shapes") } - -// Int64 returns the Shapes value as an int64. -func (i Shapes) Int64() int64 { return int64(i) } - -// SetInt64 sets the Shapes value from an int64. -func (i *Shapes) SetInt64(in int64) { *i = Shapes(in) } - -// Desc returns the description of the Shapes value. -func (i Shapes) Desc() string { return enums.Desc(i, _ShapesDescMap) } - -// ShapesValues returns all possible values for the type Shapes. -func ShapesValues() []Shapes { return _ShapesValues } - -// Values returns all possible values for the type Shapes. -func (i Shapes) Values() []enums.Enum { return enums.Values(_ShapesValues) } - -// MarshalText implements the [encoding.TextMarshaler] interface. -func (i Shapes) MarshalText() ([]byte, error) { return []byte(i.String()), nil } - -// UnmarshalText implements the [encoding.TextUnmarshaler] interface. -func (i *Shapes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Shapes") } diff --git a/plot/plots/errbars.go b/plot/plots/errbars.go deleted file mode 100644 index 09e305e017..0000000000 --- a/plot/plots/errbars.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package plots - -import ( - "cogentcore.org/core/math32" - "cogentcore.org/core/plot" - "cogentcore.org/core/styles/units" -) - -////////////////////////////////////////////////// -// XErrorer - -// XErrorer provides an interface for a list of Low, High error bar values. -// This is used in addition to an XYer interface, if implemented. -type XErrorer interface { - // XError returns Low, High error values for X data. - XError(i int) (low, high float32) -} - -// Errors is a slice of low and high error values. -type Errors []struct{ Low, High float32 } - -// XErrors implements the XErrorer interface. -type XErrors Errors - -func (xe XErrors) XError(i int) (low, high float32) { - return xe[i].Low, xe[i].High -} - -// YErrorer provides an interface for YError method. -// This is used in addition to an XYer interface, if implemented. -type YErrorer interface { - // YError returns two error values for Y data. - YError(i int) (float32, float32) -} - -// YErrors implements the YErrorer interface. -type YErrors Errors - -func (ye YErrors) YError(i int) (float32, float32) { - return ye[i].Low, ye[i].High -} - -// YErrorBars implements the plot.Plotter, plot.DataRanger, -// and plot.GlyphBoxer interfaces, drawing vertical error -// bars, denoting error in Y values. -type YErrorBars struct { - // XYs is a copy of the points for this line. - plot.XYs - - // YErrors is a copy of the Y errors for each point. - YErrors - - // PXYs is the actual pixel plotting coordinates for each XY value, - // representing the high, center value of the error bar. - PXYs plot.XYs - - // LineStyle is the style used to draw the error bars. - LineStyle plot.LineStyle - - // CapWidth is the width of the caps drawn at the top of each error bar. - CapWidth units.Value -} - -func (eb *YErrorBars) Defaults() { - eb.LineStyle.Defaults() - eb.CapWidth.Dp(10) -} - -// NewYErrorBars returns a new YErrorBars plotter, or an error on failure. -// The error values from the YErrorer interface are interpreted as relative -// to the corresponding Y value. The errors for a given Y value are computed -// by taking the absolute value of the error returned by the YErrorer -// and subtracting the first and adding the second to the Y value. -func NewYErrorBars(yerrs interface { - plot.XYer - YErrorer -}) (*YErrorBars, error) { - - errors := make(YErrors, yerrs.Len()) - for i := range errors { - errors[i].Low, errors[i].High = yerrs.YError(i) - if err := plot.CheckFloats(errors[i].Low, errors[i].High); err != nil { - return nil, err - } - } - xys, err := plot.CopyXYs(yerrs) - if err != nil { - return nil, err - } - - eb := &YErrorBars{ - XYs: xys, - YErrors: errors, - } - eb.Defaults() - return eb, nil -} - -func (e *YErrorBars) XYData() (data plot.XYer, pixels plot.XYer) { - data = e.XYs - pixels = e.PXYs - return -} - -// Plot implements the Plotter interface, drawing labels. -func (e *YErrorBars) Plot(plt *plot.Plot) { - pc := plt.Paint - uc := &pc.UnitContext - - e.CapWidth.ToDots(uc) - cw := 0.5 * e.CapWidth.Dots - nv := len(e.YErrors) - e.PXYs = make(plot.XYs, nv) - e.LineStyle.SetStroke(plt) - for i, err := range e.YErrors { - x := plt.PX(e.XYs[i].X) - ylow := plt.PY(e.XYs[i].Y - math32.Abs(err.Low)) - yhigh := plt.PY(e.XYs[i].Y + math32.Abs(err.High)) - - e.PXYs[i].X = x - e.PXYs[i].Y = yhigh - - pc.MoveTo(x, ylow) - pc.LineTo(x, yhigh) - - pc.MoveTo(x-cw, ylow) - pc.LineTo(x+cw, ylow) - - pc.MoveTo(x-cw, yhigh) - pc.LineTo(x+cw, yhigh) - pc.Stroke() - } -} - -// DataRange implements the plot.DataRanger interface. -func (e *YErrorBars) DataRange(plt *plot.Plot) (xmin, xmax, ymin, ymax float32) { - xmin, xmax = plot.Range(plot.XValues{e}) - ymin = math32.Inf(1) - ymax = math32.Inf(-1) - for i, err := range e.YErrors { - y := e.XYs[i].Y - ylow := y - math32.Abs(err.Low) - yhigh := y + math32.Abs(err.High) - ymin = math32.Min(math32.Min(math32.Min(ymin, y), ylow), yhigh) - ymax = math32.Max(math32.Max(math32.Max(ymax, y), ylow), yhigh) - } - return -} - -// XErrorBars implements the plot.Plotter, plot.DataRanger, -// and plot.GlyphBoxer interfaces, drawing horizontal error -// bars, denoting error in Y values. -type XErrorBars struct { - // XYs is a copy of the points for this line. - plot.XYs - - // XErrors is a copy of the X errors for each point. - XErrors - - // PXYs is the actual pixel plotting coordinates for each XY value, - // representing the high, center value of the error bar. - PXYs plot.XYs - - // LineStyle is the style used to draw the error bars. - LineStyle plot.LineStyle - - // CapWidth is the width of the caps drawn at the top - // of each error bar. - CapWidth units.Value -} - -// Returns a new XErrorBars plotter, or an error on failure. The error values -// from the XErrorer interface are interpreted as relative to the corresponding -// X value. The errors for a given X value are computed by taking the absolute -// value of the error returned by the XErrorer and subtracting the first and -// adding the second to the X value. -func NewXErrorBars(xerrs interface { - plot.XYer - XErrorer -}) (*XErrorBars, error) { - - errors := make(XErrors, xerrs.Len()) - for i := range errors { - errors[i].Low, errors[i].High = xerrs.XError(i) - if err := plot.CheckFloats(errors[i].Low, errors[i].High); err != nil { - return nil, err - } - } - xys, err := plot.CopyXYs(xerrs) - if err != nil { - return nil, err - } - - eb := &XErrorBars{ - XYs: xys, - XErrors: errors, - } - eb.Defaults() - return eb, nil -} - -func (eb *XErrorBars) Defaults() { - eb.LineStyle.Defaults() - eb.CapWidth.Dp(10) -} - -func (e *XErrorBars) XYData() (data plot.XYer, pixels plot.XYer) { - data = e.XYs - pixels = e.PXYs - return -} - -// Plot implements the Plotter interface, drawing labels. -func (e *XErrorBars) Plot(plt *plot.Plot) { - pc := plt.Paint - uc := &pc.UnitContext - - e.CapWidth.ToDots(uc) - cw := 0.5 * e.CapWidth.Dots - - nv := len(e.XErrors) - e.PXYs = make(plot.XYs, nv) - e.LineStyle.SetStroke(plt) - for i, err := range e.XErrors { - y := plt.PY(e.XYs[i].Y) - xlow := plt.PX(e.XYs[i].X - math32.Abs(err.Low)) - xhigh := plt.PX(e.XYs[i].X + math32.Abs(err.High)) - - e.PXYs[i].X = xhigh - e.PXYs[i].Y = y - - pc.MoveTo(xlow, y) - pc.LineTo(xhigh, y) - - pc.MoveTo(xlow, y-cw) - pc.LineTo(xlow, y+cw) - - pc.MoveTo(xhigh, y-cw) - pc.LineTo(xhigh, y+cw) - pc.Stroke() - } -} - -// DataRange implements the plot.DataRanger interface. -func (e *XErrorBars) DataRange(plt *plot.Plot) (xmin, xmax, ymin, ymax float32) { - ymin, ymax = plot.Range(plot.YValues{e}) - xmin = math32.Inf(1) - xmax = math32.Inf(-1) - for i, err := range e.XErrors { - x := e.XYs[i].X - xlow := x - math32.Abs(err.Low) - xhigh := x + math32.Abs(err.High) - xmin = math32.Min(math32.Min(math32.Min(xmin, x), xlow), xhigh) - xmax = math32.Max(math32.Max(math32.Max(xmax, x), xlow), xhigh) - } - return -} diff --git a/plot/plots/labels.go b/plot/plots/labels.go deleted file mode 100644 index be431c2665..0000000000 --- a/plot/plots/labels.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package plots - -import ( - "errors" - "image" - - "cogentcore.org/core/math32" - "cogentcore.org/core/plot" - "cogentcore.org/core/styles/units" -) - -// Labels implements the Plotter interface, -// drawing a set of labels at specified points. -type Labels struct { - // XYs is a copy of the points for labels - plot.XYs - - // PXYs is the actual pixel plotting coordinates for each XY value. - PXYs plot.XYs - - // Labels is the set of labels corresponding to each point. - Labels []string - - // TextStyle is the style of the label text. - // Each label can have a different text style, but - // by default they share a common one (len = 1) - TextStyle []plot.TextStyle - - // Offset is added directly to the final label location. - Offset units.XY - - // plot size and number of TextStyle when styles last generated -- don't regen - styleSize image.Point - styleN int -} - -// NewLabels returns a new Labels using defaults -func NewLabels(d XYLabeler) (*Labels, error) { - xys, err := plot.CopyXYs(d) - if err != nil { - return nil, err - } - - if d.Len() != len(xys) { - return nil, errors.New("plotter: number of points does not match the number of labels") - } - - strs := make([]string, d.Len()) - for i := range strs { - strs[i] = d.Label(i) - } - - styles := make([]plot.TextStyle, 1) - for i := range styles { - styles[i].Defaults() - } - - return &Labels{ - XYs: xys, - Labels: strs, - TextStyle: styles, - }, nil -} - -func (l *Labels) XYData() (data plot.XYer, pixels plot.XYer) { - data = l.XYs - pixels = l.PXYs - return -} - -// updateStyles updates the text styles and dots. -// returns true if custom styles are used per point -func (l *Labels) updateStyles(plt *plot.Plot) bool { - customStyles := len(l.TextStyle) == len(l.XYs) - if plt.Size == l.styleSize && len(l.TextStyle) == l.styleN { - return customStyles - } - l.styleSize = plt.Size - l.styleN = len(l.TextStyle) - pc := plt.Paint - uc := &pc.UnitContext - l.Offset.ToDots(uc) - for i := range l.TextStyle { - l.TextStyle[i].ToDots(uc) - } - return customStyles -} - -// Plot implements the Plotter interface, drawing labels. -func (l *Labels) Plot(plt *plot.Plot) { - ps := plot.PlotXYs(plt, l.XYs) - customStyles := l.updateStyles(plt) - var ltxt plot.Text - for i, label := range l.Labels { - if label == "" { - continue - } - if customStyles { - ltxt.Style = l.TextStyle[i] - } else { - ltxt.Style = l.TextStyle[0] - } - ltxt.Text = label - ltxt.Config(plt) - tht := ltxt.PaintText.BBox.Size().Y - ltxt.Draw(plt, math32.Vec2(ps[i].X+l.Offset.X.Dots, ps[i].Y+l.Offset.Y.Dots-tht)) - } -} - -// DataRange returns the minimum and maximum X and Y values -func (l *Labels) DataRange(plt *plot.Plot) (xmin, xmax, ymin, ymax float32) { - xmin, xmax, ymin, ymax = plot.XYRange(l) // first get basic numerical range - pxToData := math32.FromPoint(plt.Size) - pxToData.X = (xmax - xmin) / pxToData.X - pxToData.Y = (ymax - ymin) / pxToData.Y - customStyles := l.updateStyles(plt) - var ltxt plot.Text - for i, label := range l.Labels { - if label == "" { - continue - } - if customStyles { - ltxt.Style = l.TextStyle[i] - } else { - ltxt.Style = l.TextStyle[0] - } - ltxt.Text = label - ltxt.Config(plt) - tht := pxToData.Y * ltxt.PaintText.BBox.Size().Y - twd := 1.1 * pxToData.X * ltxt.PaintText.BBox.Size().X - x, y := l.XY(i) - minx := x - maxx := x + pxToData.X*l.Offset.X.Dots + twd - miny := y - maxy := y + pxToData.Y*l.Offset.Y.Dots + tht // y is up here - xmin = min(xmin, minx) - xmax = max(xmax, maxx) - ymin = min(ymin, miny) - ymax = max(ymax, maxy) - } - return -} - -// XYLabeler combines the [plot.XYer] and [plot.Labeler] types. -type XYLabeler interface { - plot.XYer - plot.Labeler -} - -// XYLabels holds XY data with labels. -// The ith label corresponds to the ith XY. -type XYLabels struct { - plot.XYs - Labels []string -} - -// Label returns the label for point index i. -func (l XYLabels) Label(i int) string { - return l.Labels[i] -} - -var _ XYLabeler = (*XYLabels)(nil) diff --git a/plot/plots/line.go b/plot/plots/line.go deleted file mode 100644 index f1697e3691..0000000000 --- a/plot/plots/line.go +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Adapted from github.com/gonum/plot: -// Copyright Ā©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package plots - -//go:generate core generate - -import ( - "image" - - "cogentcore.org/core/math32" - "cogentcore.org/core/plot" -) - -// StepKind specifies a form of a connection of two consecutive points. -type StepKind int32 //enums:enum - -const ( - // NoStep connects two points by simple line - NoStep StepKind = iota - - // PreStep connects two points by following lines: vertical, horizontal. - PreStep - - // MidStep connects two points by following lines: horizontal, vertical, horizontal. - // Vertical line is placed in the middle of the interval. - MidStep - - // PostStep connects two points by following lines: horizontal, vertical. - PostStep -) - -// Line implements the Plotter interface, drawing a line using XYer data. -type Line struct { - // XYs is a copy of the points for this line. - plot.XYs - - // PXYs is the actual pixel plotting coordinates for each XY value. - PXYs plot.XYs - - // StepStyle is the kind of the step line. - StepStyle StepKind - - // LineStyle is the style of the line connecting the points. - // Use zero width to disable lines. - LineStyle plot.LineStyle - - // Fill is the color to fill the area below the plot. - // Use nil to disable filling, which is the default. - Fill image.Image - - // if true, draw lines that connect points with a negative X-axis direction; - // otherwise there is a break in the line. - // default is false, so that repeated series of data across the X axis - // are plotted separately. - NegativeXDraw bool -} - -// NewLine returns a Line that uses the default line style and -// does not draw glyphs. -func NewLine(xys plot.XYer) (*Line, error) { - data, err := plot.CopyXYs(xys) - if err != nil { - return nil, err - } - ln := &Line{XYs: data} - ln.Defaults() - return ln, nil -} - -// NewLinePoints returns both a Line and a -// Scatter plot for the given point data. -func NewLinePoints(xys plot.XYer) (*Line, *Scatter, error) { - sc, err := NewScatter(xys) - if err != nil { - return nil, nil, err - } - ln := &Line{XYs: sc.XYs} - ln.Defaults() - return ln, sc, nil -} - -func (pts *Line) Defaults() { - pts.LineStyle.Defaults() -} - -func (pts *Line) XYData() (data plot.XYer, pixels plot.XYer) { - data = pts.XYs - pixels = pts.PXYs - return -} - -// Plot draws the Line, implementing the plot.Plotter interface. -func (pts *Line) Plot(plt *plot.Plot) { - pc := plt.Paint - - ps := plot.PlotXYs(plt, pts.XYs) - np := len(ps) - pts.PXYs = ps - - if pts.Fill != nil { - pc.FillStyle.Color = pts.Fill - minY := plt.PY(plt.Y.Min) - prev := math32.Vec2(ps[0].X, minY) - pc.MoveTo(prev.X, prev.Y) - for i := range ps { - pt := ps[i] - switch pts.StepStyle { - case NoStep: - if pt.X < prev.X { - pc.LineTo(prev.X, minY) - pc.ClosePath() - pc.MoveTo(pt.X, minY) - } - pc.LineTo(pt.X, pt.Y) - case PreStep: - if i == 0 { - continue - } - if pt.X < prev.X { - pc.LineTo(prev.X, minY) - pc.ClosePath() - pc.MoveTo(pt.X, minY) - } else { - pc.LineTo(prev.X, pt.Y) - } - pc.LineTo(pt.X, pt.Y) - case MidStep: - if pt.X < prev.X { - pc.LineTo(prev.X, minY) - pc.ClosePath() - pc.MoveTo(pt.X, minY) - } else { - pc.LineTo(0.5*(prev.X+pt.X), prev.Y) - pc.LineTo(0.5*(prev.X+pt.X), pt.Y) - } - pc.LineTo(pt.X, pt.Y) - case PostStep: - if pt.X < prev.X { - pc.LineTo(prev.X, minY) - pc.ClosePath() - pc.MoveTo(pt.X, minY) - } else { - pc.LineTo(pt.X, prev.Y) - } - pc.LineTo(pt.X, pt.Y) - } - prev = pt - } - pc.LineTo(prev.X, minY) - pc.ClosePath() - pc.Fill() - } - pc.FillStyle.Color = nil - - if !pts.LineStyle.SetStroke(plt) { - return - } - prev := ps[0] - pc.MoveTo(prev.X, prev.Y) - for i := 1; i < np; i++ { - pt := ps[i] - if pts.StepStyle != NoStep { - if pt.X >= prev.X { - switch pts.StepStyle { - case PreStep: - pc.LineTo(prev.X, pt.Y) - case MidStep: - pc.LineTo(0.5*(prev.X+pt.X), prev.Y) - pc.LineTo(0.5*(prev.X+pt.X), pt.Y) - case PostStep: - pc.LineTo(pt.X, prev.Y) - } - } else { - pc.MoveTo(pt.X, pt.Y) - } - } - if !pts.NegativeXDraw && pt.X < prev.X { - pc.MoveTo(pt.X, pt.Y) - } else { - pc.LineTo(pt.X, pt.Y) - } - prev = pt - } - pc.Stroke() -} - -// DataRange returns the minimum and maximum -// x and y values, implementing the plot.DataRanger interface. -func (pts *Line) DataRange(plt *plot.Plot) (xmin, xmax, ymin, ymax float32) { - return plot.XYRange(pts) -} - -// Thumbnail returns the thumbnail for the LineTo, implementing the plot.Thumbnailer interface. -func (pts *Line) Thumbnail(plt *plot.Plot) { - pc := plt.Paint - ptb := pc.Bounds - midY := 0.5 * float32(ptb.Min.Y+ptb.Max.Y) - - if pts.Fill != nil { - tb := ptb - if pts.LineStyle.Width.Value > 0 { - tb.Min.Y = int(midY) - } - pc.FillBox(math32.FromPoint(tb.Min), math32.FromPoint(tb.Size()), pts.Fill) - } - - if pts.LineStyle.SetStroke(plt) { - pc.MoveTo(float32(ptb.Min.X), midY) - pc.LineTo(float32(ptb.Max.X), midY) - pc.Stroke() - } -} diff --git a/plot/plots/plot_test.go b/plot/plots/plot_test.go deleted file mode 100644 index c85f75020d..0000000000 --- a/plot/plots/plot_test.go +++ /dev/null @@ -1,327 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package plots - -import ( - "fmt" - "image" - "os" - "testing" - - "cogentcore.org/core/base/iox/imagex" - "cogentcore.org/core/colors" - "cogentcore.org/core/math32" - "cogentcore.org/core/paint" - "cogentcore.org/core/plot" -) - -func TestMain(m *testing.M) { - paint.FontLibrary.InitFontPaths(paint.FontPaths...) - os.Exit(m.Run()) -} - -func TestLine(t *testing.T) { - pt := plot.New() - pt.Title.Text = "Test Line" - pt.X.Min = 0 - pt.X.Max = 100 - pt.X.Label.Text = "X Axis" - pt.Y.Min = 0 - pt.Y.Max = 100 - pt.Y.Label.Text = "Y Axis" - - // note: making two overlapping series - data := make(plot.XYs, 42) - for i := range data { - x := float32(i % 21) - data[i].X = x * 5 - if i < 21 { - data[i].Y = float32(50) + 40*math32.Sin((x/8)*math32.Pi) - } else { - data[i].Y = float32(50) + 40*math32.Cos((x/8)*math32.Pi) - } - } - - l1, err := NewLine(data) - if err != nil { - t.Error(err.Error()) - } - pt.Add(l1) - pt.Legend.Add("Sine", l1) - pt.Legend.Add("Cos", l1) - - pt.Resize(image.Point{640, 480}) - pt.Draw() - imagex.Assert(t, pt.Pixels, "line.png") - - l1.Fill = colors.Uniform(colors.Yellow) - pt.Draw() - imagex.Assert(t, pt.Pixels, "line-fill.png") - - l1.StepStyle = PreStep - pt.Draw() - imagex.Assert(t, pt.Pixels, "line-prestep.png") - - l1.StepStyle = MidStep - pt.Draw() - imagex.Assert(t, pt.Pixels, "line-midstep.png") - - l1.StepStyle = PostStep - pt.Draw() - imagex.Assert(t, pt.Pixels, "line-poststep.png") - - l1.StepStyle = NoStep - l1.Fill = nil - l1.NegativeXDraw = true - pt.Draw() - imagex.Assert(t, pt.Pixels, "line-negx.png") - -} - -func TestScatter(t *testing.T) { - pt := plot.New() - pt.Title.Text = "Test Scatter" - pt.X.Min = 0 - pt.X.Max = 100 - pt.X.Label.Text = "X Axis" - pt.Y.Min = 0 - pt.Y.Max = 100 - pt.Y.Label.Text = "Y Axis" - - data := make(plot.XYs, 21) - for i := range data { - data[i].X = float32(i * 5) - data[i].Y = float32(50) + 40*math32.Sin((float32(i)/8)*math32.Pi) - } - - l1, err := NewScatter(data) - if err != nil { - t.Error(err.Error()) - } - pt.Add(l1) - - pt.Resize(image.Point{640, 480}) - - shs := ShapesValues() - for _, sh := range shs { - l1.PointShape = sh - pt.Draw() - imagex.Assert(t, pt.Pixels, "scatter-"+sh.String()+".png") - } -} - -func TestLabels(t *testing.T) { - pt := plot.New() - pt.Title.Text = "Test Labels" - pt.X.Label.Text = "X Axis" - pt.Y.Label.Text = "Y Axis" - - // note: making two overlapping series - data := make(plot.XYs, 12) - labels := make([]string, 12) - for i := range data { - x := float32(i % 21) - data[i].X = x * 5 - data[i].Y = float32(50) + 40*math32.Sin((x/8)*math32.Pi) - labels[i] = fmt.Sprintf("%7.4g", data[i].Y) - } - - l1, sc, err := NewLinePoints(data) - if err != nil { - t.Error(err.Error()) - } - pt.Add(l1) - pt.Add(sc) - pt.Legend.Add("Sine", l1, sc) - - l2, err := NewLabels(XYLabels{XYs: data, Labels: labels}) - if err != nil { - t.Error(err.Error()) - } - l2.Offset.X.Dp(6) - l2.Offset.Y.Dp(-6) - pt.Add(l2) - - pt.Resize(image.Point{640, 480}) - pt.Draw() - imagex.Assert(t, pt.Pixels, "labels.png") -} - -func TestBarChart(t *testing.T) { - pt := plot.New() - pt.Title.Text = "Test Bar Chart" - pt.X.Label.Text = "X Axis" - pt.Y.Min = 0 - pt.Y.Max = 100 - pt.Y.Label.Text = "Y Axis" - - data := make(plot.Values, 21) - for i := range data { - x := float32(i % 21) - data[i] = float32(50) + 40*math32.Sin((x/8)*math32.Pi) - } - - cos := make(plot.Values, 21) - for i := range data { - x := float32(i % 21) - cos[i] = float32(50) + 40*math32.Cos((x/8)*math32.Pi) - } - - l1, err := NewBarChart(data, nil) - if err != nil { - t.Error(err.Error()) - } - l1.Color = colors.Uniform(colors.Red) - pt.Add(l1) - pt.Legend.Add("Sine", l1) - - pt.Resize(image.Point{640, 480}) - pt.Draw() - imagex.Assert(t, pt.Pixels, "bar.png") - - l2, err := NewBarChart(cos, nil) - if err != nil { - t.Error(err.Error()) - } - l2.Color = colors.Uniform(colors.Blue) - pt.Legend.Add("Cosine", l2) - - l1.Stride = 2 - l2.Stride = 2 - l2.Offset = 2 - - pt.Add(l2) // note: range updated when added! - pt.Draw() - imagex.Assert(t, pt.Pixels, "bar-cos.png") -} - -func TestBarChartErr(t *testing.T) { - pt := plot.New() - pt.Title.Text = "Test Bar Chart Errors" - pt.X.Label.Text = "X Axis" - pt.Y.Min = 0 - pt.Y.Max = 100 - pt.Y.Label.Text = "Y Axis" - - data := make(plot.Values, 21) - for i := range data { - x := float32(i % 21) - data[i] = float32(50) + 40*math32.Sin((x/8)*math32.Pi) - } - - cos := make(plot.Values, 21) - for i := range data { - x := float32(i % 21) - cos[i] = float32(5) + 4*math32.Cos((x/8)*math32.Pi) - } - - l1, err := NewBarChart(data, cos) - if err != nil { - t.Error(err.Error()) - } - l1.Color = colors.Uniform(colors.Red) - pt.Add(l1) - pt.Legend.Add("Sine", l1) - - pt.Resize(image.Point{640, 480}) - pt.Draw() - imagex.Assert(t, pt.Pixels, "bar-err.png") - - l1.Horizontal = true - pt.UpdateRange() - pt.X.Min = 0 - pt.X.Max = 100 - pt.Draw() - imagex.Assert(t, pt.Pixels, "bar-err-horiz.png") -} - -func TestBarChartStack(t *testing.T) { - pt := plot.New() - pt.Title.Text = "Test Bar Chart Stacked" - pt.X.Label.Text = "X Axis" - pt.Y.Min = 0 - pt.Y.Max = 100 - pt.Y.Label.Text = "Y Axis" - - data := make(plot.Values, 21) - for i := range data { - x := float32(i % 21) - data[i] = float32(50) + 40*math32.Sin((x/8)*math32.Pi) - } - - cos := make(plot.Values, 21) - for i := range data { - x := float32(i % 21) - cos[i] = float32(5) + 4*math32.Cos((x/8)*math32.Pi) - } - - l1, err := NewBarChart(data, nil) - if err != nil { - t.Error(err.Error()) - } - l1.Color = colors.Uniform(colors.Red) - pt.Add(l1) - pt.Legend.Add("Sine", l1) - - l2, err := NewBarChart(cos, nil) - if err != nil { - t.Error(err.Error()) - } - l2.Color = colors.Uniform(colors.Blue) - l2.StackedOn = l1 - pt.Add(l2) - pt.Legend.Add("Cos", l2) - - pt.Resize(image.Point{640, 480}) - pt.Draw() - imagex.Assert(t, pt.Pixels, "bar-stacked.png") -} - -type XYErr struct { - plot.XYs - YErrors -} - -func TestErrBar(t *testing.T) { - pt := plot.New() - pt.Title.Text = "Test Line Errors" - pt.X.Label.Text = "X Axis" - pt.Y.Min = 0 - pt.Y.Max = 100 - pt.Y.Label.Text = "Y Axis" - - data := make(plot.XYs, 21) - for i := range data { - x := float32(i % 21) - data[i].X = x * 5 - data[i].Y = float32(50) + 40*math32.Sin((x/8)*math32.Pi) - } - - yerr := make(YErrors, 21) - for i := range yerr { - x := float32(i % 21) - yerr[i].High = float32(5) + 4*math32.Cos((x/8)*math32.Pi) - yerr[i].Low = -yerr[i].High - } - - xyerr := XYErr{XYs: data, YErrors: yerr} - - l1, err := NewLine(data) - if err != nil { - t.Error(err.Error()) - } - pt.Add(l1) - pt.Legend.Add("Sine", l1) - - l2, err := NewYErrorBars(xyerr) - if err != nil { - t.Error(err.Error()) - } - pt.Add(l2) - - pt.Resize(image.Point{640, 480}) - pt.Draw() - imagex.Assert(t, pt.Pixels, "errbar.png") -} diff --git a/plot/plots/scatter.go b/plot/plots/scatter.go deleted file mode 100644 index bfd5cdf754..0000000000 --- a/plot/plots/scatter.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Adapted from github.com/gonum/plot: -// Copyright Ā©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package plots - -import ( - "cogentcore.org/core/math32" - "cogentcore.org/core/plot" - "cogentcore.org/core/styles/units" -) - -// Scatter implements the Plotter interface, drawing -// a shape for each point. -type Scatter struct { - // XYs is a copy of the points for this scatter. - plot.XYs - - // PXYs is the actual plotting coordinates for each XY value. - PXYs plot.XYs - - // size of shape to draw for each point - PointSize units.Value - - // shape to draw for each point - PointShape Shapes - - // LineStyle is the style of the line connecting the points. - // Use zero width to disable lines. - LineStyle plot.LineStyle -} - -// NewScatter returns a Scatter that uses the -// default glyph style. -func NewScatter(xys plot.XYer) (*Scatter, error) { - data, err := plot.CopyXYs(xys) - if err != nil { - return nil, err - } - sc := &Scatter{XYs: data} - sc.LineStyle.Defaults() - sc.PointSize.Pt(4) - return sc, nil -} - -func (pts *Scatter) XYData() (data plot.XYer, pixels plot.XYer) { - data = pts.XYs - pixels = pts.PXYs - return -} - -// Plot draws the Line, implementing the plot.Plotter interface. -func (pts *Scatter) Plot(plt *plot.Plot) { - pc := plt.Paint - if !pts.LineStyle.SetStroke(plt) { - return - } - pts.PointSize.ToDots(&pc.UnitContext) - pc.FillStyle.Color = pts.LineStyle.Color - ps := plot.PlotXYs(plt, pts.XYs) - for i := range ps { - pt := ps[i] - DrawShape(pc, math32.Vec2(pt.X, pt.Y), pts.PointSize.Dots, pts.PointShape) - } - pc.FillStyle.Color = nil -} - -// DataRange returns the minimum and maximum -// x and y values, implementing the plot.DataRanger interface. -func (pts *Scatter) DataRange(plt *plot.Plot) (xmin, xmax, ymin, ymax float32) { - return plot.XYRange(pts) -} - -// Thumbnail the thumbnail for the Scatter, -// implementing the plot.Thumbnailer interface. -func (pts *Scatter) Thumbnail(plt *plot.Plot) { - if !pts.LineStyle.SetStroke(plt) { - return - } - pc := plt.Paint - pts.PointSize.ToDots(&pc.UnitContext) - pc.FillStyle.Color = pts.LineStyle.Color - ptb := pc.Bounds - midX := 0.5 * float32(ptb.Min.X+ptb.Max.X) - midY := 0.5 * float32(ptb.Min.Y+ptb.Max.Y) - - DrawShape(pc, math32.Vec2(midX, midY), pts.PointSize.Dots, pts.PointShape) -} diff --git a/plot/plots/shapes.go b/plot/plots/shapes.go deleted file mode 100644 index 3d6252343d..0000000000 --- a/plot/plots/shapes.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package plots - -import ( - "cogentcore.org/core/math32" - "cogentcore.org/core/paint" -) - -type Shapes int32 //enums:enum - -const ( - // Ring is the outline of a circle - Ring Shapes = iota - - // Circle is a solid circle - Circle - - // Square is the outline of a square - Square - - // Box is a filled square - Box - - // Triangle is the outline of a triangle - Triangle - - // Pyramid is a filled triangle - Pyramid - - // Plus is a plus sign - Plus - - // Cross is a big X - Cross -) - -// DrawShape draws the given shape -func DrawShape(pc *paint.Context, pos math32.Vector2, size float32, shape Shapes) { - switch shape { - case Ring: - DrawRing(pc, pos, size) - case Circle: - DrawCircle(pc, pos, size) - case Square: - DrawSquare(pc, pos, size) - case Box: - DrawBox(pc, pos, size) - case Triangle: - DrawTriangle(pc, pos, size) - case Pyramid: - DrawPyramid(pc, pos, size) - case Plus: - DrawPlus(pc, pos, size) - case Cross: - DrawCross(pc, pos, size) - } -} - -func DrawRing(pc *paint.Context, pos math32.Vector2, size float32) { - pc.DrawCircle(pos.X, pos.Y, size) - pc.Stroke() -} - -func DrawCircle(pc *paint.Context, pos math32.Vector2, size float32) { - pc.DrawCircle(pos.X, pos.Y, size) - pc.FillStrokeClear() -} - -func DrawSquare(pc *paint.Context, pos math32.Vector2, size float32) { - x := size * 0.9 - pc.MoveTo(pos.X-x, pos.Y-x) - pc.LineTo(pos.X+x, pos.Y-x) - pc.LineTo(pos.X+x, pos.Y+x) - pc.LineTo(pos.X-x, pos.Y+x) - pc.ClosePath() - pc.Stroke() -} - -func DrawBox(pc *paint.Context, pos math32.Vector2, size float32) { - x := size * 0.9 - pc.MoveTo(pos.X-x, pos.Y-x) - pc.LineTo(pos.X+x, pos.Y-x) - pc.LineTo(pos.X+x, pos.Y+x) - pc.LineTo(pos.X-x, pos.Y+x) - pc.ClosePath() - pc.FillStrokeClear() -} - -func DrawTriangle(pc *paint.Context, pos math32.Vector2, size float32) { - x := size * 0.9 - pc.MoveTo(pos.X, pos.Y-x) - pc.LineTo(pos.X-x, pos.Y+x) - pc.LineTo(pos.X+x, pos.Y+x) - pc.ClosePath() - pc.Stroke() -} - -func DrawPyramid(pc *paint.Context, pos math32.Vector2, size float32) { - x := size * 0.9 - pc.MoveTo(pos.X, pos.Y-x) - pc.LineTo(pos.X-x, pos.Y+x) - pc.LineTo(pos.X+x, pos.Y+x) - pc.ClosePath() - pc.FillStrokeClear() -} - -func DrawPlus(pc *paint.Context, pos math32.Vector2, size float32) { - x := size * 1.05 - pc.MoveTo(pos.X-x, pos.Y) - pc.LineTo(pos.X+x, pos.Y) - pc.MoveTo(pos.X, pos.Y-x) - pc.LineTo(pos.X, pos.Y+x) - pc.ClosePath() - pc.Stroke() -} - -func DrawCross(pc *paint.Context, pos math32.Vector2, size float32) { - x := size * 0.9 - pc.MoveTo(pos.X-x, pos.Y-x) - pc.LineTo(pos.X+x, pos.Y+x) - pc.MoveTo(pos.X+x, pos.Y-x) - pc.LineTo(pos.X-x, pos.Y+x) - pc.ClosePath() - pc.Stroke() -} diff --git a/plot/plots/table.go b/plot/plots/table.go deleted file mode 100644 index 41644ce7a6..0000000000 --- a/plot/plots/table.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package plots - -import "cogentcore.org/core/plot" - -// Table is an interface for tabular data for plotting, -// with columns of values. -type Table interface { - // number of columns of data - NumColumns() int - - // name of given column - ColumnName(i int) string - - // number of rows of data - NumRows() int - - // PlotData returns the data value at given column and row - PlotData(column, row int) float32 -} - -func TableColumnIndex(tab Table, name string) int { - for i := range tab.NumColumns() { - if tab.ColumnName(i) == name { - return i - } - } - return -1 -} - -// TableXYer is an interface for providing XY access to Table data -type TableXYer struct { - Table Table - - // the indexes of the tensor columns to use for the X and Y data, respectively - XColumn, YColumn int -} - -func NewTableXYer(tab Table, xcolumn, ycolumn int) *TableXYer { - txy := &TableXYer{Table: tab, XColumn: xcolumn, YColumn: ycolumn} - return txy -} - -func (dt *TableXYer) Len() int { - return dt.Table.NumRows() -} - -func (dt *TableXYer) XY(i int) (x, y float32) { - return dt.Table.PlotData(dt.XColumn, i), dt.Table.PlotData(dt.YColumn, i) -} - -// AddTableLine adds Line with given x, y columns from given tabular data -func AddTableLine(plt *plot.Plot, tab Table, xcolumn, ycolumn int) (*Line, error) { - txy := NewTableXYer(tab, xcolumn, ycolumn) - ln, err := NewLine(txy) - if err != nil { - return nil, err - } - plt.Add(ln) - return ln, nil -} - -// AddTableLinePoints adds Line w/ Points with given x, y columns from given tabular data -func AddTableLinePoints(plt *plot.Plot, tab Table, xcolumn, ycolumn int) (*Line, *Scatter, error) { - txy := &TableXYer{Table: tab, XColumn: xcolumn, YColumn: ycolumn} - ln, sc, err := NewLinePoints(txy) - if err != nil { - return nil, nil, err - } - plt.Add(ln) - plt.Add(sc) - return ln, sc, nil -} diff --git a/plot/plotter.go b/plot/plotter.go deleted file mode 100644 index a656a3f808..0000000000 --- a/plot/plotter.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package plot - -// Plotter is an interface that wraps the Plot method. -// Some standard implementations of Plotter can be found in plotters. -type Plotter interface { - // Plot draws the data to the Plot Paint - Plot(pt *Plot) - - // returns the data for this plot as X,Y points, - // including corresponding pixel data. - // This allows gui interface to inspect data etc. - XYData() (data XYer, pixels XYer) -} - -// DataRanger wraps the DataRange method. -type DataRanger interface { - // DataRange returns the range of X and Y values. - DataRange(pt *Plot) (xmin, xmax, ymin, ymax float32) -} diff --git a/plot/text.go b/plot/text.go deleted file mode 100644 index cb86017a65..0000000000 --- a/plot/text.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package plot - -import ( - "cogentcore.org/core/colors" - "cogentcore.org/core/math32" - "cogentcore.org/core/paint" - "cogentcore.org/core/styles" - "cogentcore.org/core/styles/units" -) - -// DefaultFontFamily specifies a default font for plotting. -// if not set, the standard Cogent Core default font is used. -var DefaultFontFamily = "" - -// TextStyle specifies styling parameters for Text elements -type TextStyle struct { - styles.FontRender - - // how to align text along the relevant dimension for the text element - Align styles.Aligns - - // Padding is used in a case-dependent manner to add space around text elements - Padding units.Value - - // rotation of the text, in Degrees - Rotation float32 -} - -func (ts *TextStyle) Defaults() { - ts.FontRender.Defaults() - ts.Color = colors.Scheme.OnSurface - ts.Align = styles.Center - if DefaultFontFamily != "" { - ts.FontRender.Family = DefaultFontFamily - } -} - -func (ts *TextStyle) openFont(pt *Plot) { - if ts.Font.Face == nil { - paint.OpenFont(&ts.FontRender, &pt.Paint.UnitContext) // calls SetUnContext after updating metrics - } -} - -func (ts *TextStyle) ToDots(uc *units.Context) { - ts.FontRender.ToDots(uc) - ts.Padding.ToDots(uc) -} - -// Text specifies a single text element in a plot -type Text struct { - - // text string, which can use HTML formatting - Text string - - // styling for this text element - Style TextStyle - - // PaintText is the [paint.Text] for the text. - PaintText paint.Text -} - -func (tx *Text) Defaults() { - tx.Style.Defaults() -} - -// config is called during the layout of the plot, prior to drawing -func (tx *Text) Config(pt *Plot) { - uc := &pt.Paint.UnitContext - fs := &tx.Style.FontRender - if math32.Abs(tx.Style.Rotation) > 10 { - tx.Style.Align = styles.End - } - fs.ToDots(uc) - tx.Style.Padding.ToDots(uc) - txln := float32(len(tx.Text)) - fht := fs.Size.Dots - hsz := float32(12) * txln - txs := &pt.StandardTextStyle - tx.PaintText.SetHTML(tx.Text, fs, txs, uc, nil) - tx.PaintText.Layout(txs, fs, uc, math32.Vector2{X: hsz, Y: fht}) - if tx.Style.Rotation != 0 { - rotx := math32.Rotate2D(math32.DegToRad(tx.Style.Rotation)) - tx.PaintText.Transform(rotx, fs, uc) - } -} - -// PosX returns the starting position for a horizontally-aligned text element, -// based on given width. Text must have been config'd already. -func (tx *Text) PosX(width float32) math32.Vector2 { - pos := math32.Vector2{} - pos.X = styles.AlignFactor(tx.Style.Align) * width - switch tx.Style.Align { - case styles.Center: - pos.X -= 0.5 * tx.PaintText.BBox.Size().X - case styles.End: - pos.X -= tx.PaintText.BBox.Size().X - } - if math32.Abs(tx.Style.Rotation) > 10 { - pos.Y += 0.5 * tx.PaintText.BBox.Size().Y - } - return pos -} - -// PosY returns the starting position for a vertically-rotated text element, -// based on given height. Text must have been config'd already. -func (tx *Text) PosY(height float32) math32.Vector2 { - pos := math32.Vector2{} - pos.Y = styles.AlignFactor(tx.Style.Align) * height - switch tx.Style.Align { - case styles.Center: - pos.Y -= 0.5 * tx.PaintText.BBox.Size().Y - case styles.End: - pos.Y -= tx.PaintText.BBox.Size().Y - } - return pos -} - -// Draw renders the text at given upper left position -func (tx *Text) Draw(pt *Plot, pos math32.Vector2) { - tx.PaintText.Render(pt.Paint, pos) -} diff --git a/plot/tick.go b/plot/tick.go deleted file mode 100644 index 8045c2ac49..0000000000 --- a/plot/tick.go +++ /dev/null @@ -1,285 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package plot - -import ( - "strconv" - "time" - - "cogentcore.org/core/math32" -) - -// A Tick is a single tick mark on an axis. -type Tick struct { - // Value is the data value marked by this Tick. - Value float32 - - // Label is the text to display at the tick mark. - // If Label is an empty string then this is a minor tick mark. - Label string -} - -// IsMinor returns true if this is a minor tick mark. -func (tk *Tick) IsMinor() bool { - return tk.Label == "" -} - -// Ticker creates Ticks in a specified range -type Ticker interface { - // Ticks returns Ticks in a specified range - Ticks(min, max float32) []Tick -} - -// DefaultTicks is suitable for the Ticker field of an Axis, -// it returns a reasonable default set of tick marks. -type DefaultTicks struct{} - -var _ Ticker = DefaultTicks{} - -// Ticks returns Ticks in the specified range. -func (DefaultTicks) Ticks(min, max float32) []Tick { - if max <= min { - panic("illegal range") - } - - const suggestedTicks = 3 - - labels, step, q, mag := talbotLinHanrahan(min, max, suggestedTicks, withinData, nil, nil, nil) - majorDelta := step * math32.Pow10(mag) - if q == 0 { - // Simple fall back was chosen, so - // majorDelta is the label distance. - majorDelta = labels[1] - labels[0] - } - - // Choose a reasonable, but ad - // hoc formatting for labels. - fc := byte('f') - var off int - if mag < -1 || 6 < mag { - off = 1 - fc = 'g' - } - if math32.Trunc(q) != q { - off += 2 - } - prec := minInt(6, maxInt(off, -mag)) - ticks := make([]Tick, len(labels)) - for i, v := range labels { - ticks[i] = Tick{Value: v, Label: strconv.FormatFloat(float64(v), fc, prec, 32)} - } - - var minorDelta float32 - // See talbotLinHanrahan for the values used here. - switch step { - case 1, 2.5: - minorDelta = majorDelta / 5 - case 2, 3, 4, 5: - minorDelta = majorDelta / step - default: - if majorDelta/2 < dlamchP { - return ticks - } - minorDelta = majorDelta / 2 - } - - // Find the first minor tick not greater - // than the lowest data value. - var i float32 - for labels[0]+(i-1)*minorDelta > min { - i-- - } - // Add ticks at minorDelta intervals when - // they are not within minorDelta/2 of a - // labelled tick. - for { - val := labels[0] + i*minorDelta - if val > max { - break - } - found := false - for _, t := range ticks { - if math32.Abs(t.Value-val) < minorDelta/2 { - found = true - } - } - if !found { - ticks = append(ticks, Tick{Value: val}) - } - i++ - } - - return ticks -} - -func minInt(a, b int) int { - if a < b { - return a - } - return b -} - -func maxInt(a, b int) int { - if a > b { - return a - } - return b -} - -// LogTicks is suitable for the Ticker field of an Axis, -// it returns tick marks suitable for a log-scale axis. -type LogTicks struct { - // Prec specifies the precision of tick rendering - // according to the documentation for strconv.FormatFloat. - Prec int -} - -var _ Ticker = LogTicks{} - -// Ticks returns Ticks in a specified range -func (t LogTicks) Ticks(min, max float32) []Tick { - if min <= 0 || max <= 0 { - panic("Values must be greater than 0 for a log scale.") - } - - val := math32.Pow10(int(math32.Log10(min))) - max = math32.Pow10(int(math32.Ceil(math32.Log10(max)))) - var ticks []Tick - for val < max { - for i := 1; i < 10; i++ { - if i == 1 { - ticks = append(ticks, Tick{Value: val, Label: formatFloatTick(val, t.Prec)}) - } - ticks = append(ticks, Tick{Value: val * float32(i)}) - } - val *= 10 - } - ticks = append(ticks, Tick{Value: val, Label: formatFloatTick(val, t.Prec)}) - - return ticks -} - -// ConstantTicks is suitable for the Ticker field of an Axis. -// This function returns the given set of ticks. -type ConstantTicks []Tick - -var _ Ticker = ConstantTicks{} - -// Ticks returns Ticks in a specified range -func (ts ConstantTicks) Ticks(float32, float32) []Tick { - return ts -} - -// UnixTimeIn returns a time conversion function for the given location. -func UnixTimeIn(loc *time.Location) func(t float32) time.Time { - return func(t float32) time.Time { - return time.Unix(int64(t), 0).In(loc) - } -} - -// UTCUnixTime is the default time conversion for TimeTicks. -var UTCUnixTime = UnixTimeIn(time.UTC) - -// TimeTicks is suitable for axes representing time values. -type TimeTicks struct { - // Ticker is used to generate a set of ticks. - // If nil, DefaultTicks will be used. - Ticker Ticker - - // Format is the textual representation of the time value. - // If empty, time.RFC3339 will be used - Format string - - // Time takes a float32 value and converts it into a time.Time. - // If nil, UTCUnixTime is used. - Time func(t float32) time.Time -} - -var _ Ticker = TimeTicks{} - -// Ticks implements plot.Ticker. -func (t TimeTicks) Ticks(min, max float32) []Tick { - if t.Ticker == nil { - t.Ticker = DefaultTicks{} - } - if t.Format == "" { - t.Format = time.RFC3339 - } - if t.Time == nil { - t.Time = UTCUnixTime - } - - ticks := t.Ticker.Ticks(min, max) - for i := range ticks { - tick := &ticks[i] - if tick.Label == "" { - continue - } - tick.Label = t.Time(tick.Value).Format(t.Format) - } - return ticks -} - -/* -// lengthOffset returns an offset that should be added to the -// tick mark's line to accout for its length. I.e., the start of -// the line for a minor tick mark must be shifted by half of -// the length. -func (t Tick) lengthOffset(len vg.Length) vg.Length { - if t.IsMinor() { - return len / 2 - } - return 0 -} - -// tickLabelHeight returns height of the tick mark labels. -func tickLabelHeight(sty text.Style, ticks []Tick) vg.Length { - maxHeight := vg.Length(0) - for _, t := range ticks { - if t.IsMinor() { - continue - } - r := sty.Rectangle(t.Label) - h := r.Max.Y - r.Min.Y - if h > maxHeight { - maxHeight = h - } - } - return maxHeight -} - -// tickLabelWidth returns the width of the widest tick mark label. -func tickLabelWidth(sty text.Style, ticks []Tick) vg.Length { - maxWidth := vg.Length(0) - for _, t := range ticks { - if t.IsMinor() { - continue - } - r := sty.Rectangle(t.Label) - w := r.Max.X - r.Min.X - if w > maxWidth { - maxWidth = w - } - } - return maxWidth -} -*/ - -// formatFloatTick returns a g-formated string representation of v -// to the specified precision. -func formatFloatTick(v float32, prec int) string { - return strconv.FormatFloat(float64(v), 'g', prec, 32) -} - -// TickerFunc is suitable for the Ticker field of an Axis. -// It is an adapter which allows to quickly setup a Ticker using a function with an appropriate signature. -type TickerFunc func(min, max float32) []Tick - -var _ Ticker = TickerFunc(nil) - -// Ticks implements plot.Ticker. -func (f TickerFunc) Ticks(min, max float32) []Tick { - return f(min, max) -} diff --git a/plot/typegen.go b/plot/typegen.go deleted file mode 100644 index cc812fd88e..0000000000 --- a/plot/typegen.go +++ /dev/null @@ -1,77 +0,0 @@ -// Code generated by "core generate -add-types"; DO NOT EDIT. - -package plot - -import ( - "cogentcore.org/core/types" -) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.Normalizer", IDName: "normalizer", Doc: "Normalizer rescales values from the data coordinate system to the\nnormalized coordinate system.", Methods: []types.Method{{Name: "Normalize", Doc: "Normalize transforms a value x in the data coordinate system to\nthe normalized coordinate system.", Args: []string{"min", "max", "x"}, Returns: []string{"float32"}}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.Axis", IDName: "axis", Doc: "Axis represents either a horizontal or vertical\naxis of a plot.", Fields: []types.Field{{Name: "Min", Doc: "Min and Max are the minimum and maximum data\nvalues represented by the axis."}, {Name: "Max", Doc: "Min and Max are the minimum and maximum data\nvalues represented by the axis."}, {Name: "Axis", Doc: "specifies which axis this is: X or Y"}, {Name: "Label", Doc: "Label for the axis"}, {Name: "Line", Doc: "Line styling properties for the axis line."}, {Name: "Padding", Doc: "Padding between the axis line and the data. Having\nnon-zero padding ensures that the data is never drawn\non the axis, thus making it easier to see."}, {Name: "TickText", Doc: "has the text style for rendering tick labels, and is shared for actual rendering"}, {Name: "TickLine", Doc: "line style for drawing tick lines"}, {Name: "TickLength", Doc: "length of tick lines"}, {Name: "Ticker", Doc: "Ticker generates the tick marks. Any tick marks\nreturned by the Marker function that are not in\nrange of the axis are not drawn."}, {Name: "Scale", Doc: "Scale transforms a value given in the data coordinate system\nto the normalized coordinate system of the axisā€”its distance\nalong the axis as a fraction of the axis range."}, {Name: "AutoRescale", Doc: "AutoRescale enables an axis to automatically adapt its minimum\nand maximum boundaries, according to its underlying Ticker."}, {Name: "ticks", Doc: "cached list of ticks, set in size"}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.LinearScale", IDName: "linear-scale", Doc: "LinearScale an be used as the value of an Axis.Scale function to\nset the axis to a standard linear scale."}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.LogScale", IDName: "log-scale", Doc: "LogScale can be used as the value of an Axis.Scale function to\nset the axis to a log scale."}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.InvertedScale", IDName: "inverted-scale", Doc: "InvertedScale can be used as the value of an Axis.Scale function to\ninvert the axis using any Normalizer.", Embeds: []types.Field{{Name: "Normalizer"}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.Valuer", IDName: "valuer", Doc: "Valuer provides an interface for a list of scalar values", Methods: []types.Method{{Name: "Len", Doc: "Len returns the number of values.", Returns: []string{"int"}}, {Name: "Value", Doc: "Value returns a value.", Args: []string{"i"}, Returns: []string{"float32"}}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.Values", IDName: "values", Doc: "Values implements the Valuer interface."}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.XYer", IDName: "x-yer", Doc: "XYer provides an interface for a list of X,Y data pairs", Methods: []types.Method{{Name: "Len", Doc: "Len returns the number of x, y pairs.", Returns: []string{"int"}}, {Name: "XY", Doc: "XY returns an x, y pair.", Args: []string{"i"}, Returns: []string{"x", "y"}}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.XYs", IDName: "x-ys", Doc: "XYs implements the XYer interface."}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.XValues", IDName: "x-values", Doc: "XValues implements the Valuer interface,\nreturning the x value from an XYer.", Embeds: []types.Field{{Name: "XYer"}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.YValues", IDName: "y-values", Doc: "YValues implements the Valuer interface,\nreturning the y value from an XYer.", Embeds: []types.Field{{Name: "XYer"}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.XYZer", IDName: "xy-zer", Doc: "XYZer provides an interface for a list of X,Y,Z data triples.\nIt also satisfies the XYer interface for the X,Y pairs.", Methods: []types.Method{{Name: "Len", Doc: "Len returns the number of x, y, z triples.", Returns: []string{"int"}}, {Name: "XYZ", Doc: "XYZ returns an x, y, z triple.", Args: []string{"i"}, Returns: []string{"float32", "float32", "float32"}}, {Name: "XY", Doc: "XY returns an x, y pair.", Args: []string{"i"}, Returns: []string{"float32", "float32"}}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.XYZs", IDName: "xy-zs", Doc: "XYZs implements the XYZer interface using a slice."}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.XYZ", IDName: "xyz", Doc: "XYZ is an x, y and z value.", Fields: []types.Field{{Name: "X"}, {Name: "Y"}, {Name: "Z"}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.XYValues", IDName: "xy-values", Doc: "XYValues implements the XYer interface, returning\nthe x and y values from an XYZer.", Embeds: []types.Field{{Name: "XYZer"}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.Labeler", IDName: "labeler", Doc: "Labeler provides an interface for a list of string labels", Methods: []types.Method{{Name: "Label", Doc: "Label returns a label.", Args: []string{"i"}, Returns: []string{"string"}}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.selection", IDName: "selection", Fields: []types.Field{{Name: "n", Doc: "n is the number of labels selected."}, {Name: "lMin", Doc: "lMin and lMax are the selected min\nand max label values. lq is the q\nchosen."}, {Name: "lMax", Doc: "lMin and lMax are the selected min\nand max label values. lq is the q\nchosen."}, {Name: "lStep", Doc: "lMin and lMax are the selected min\nand max label values. lq is the q\nchosen."}, {Name: "lq", Doc: "lMin and lMax are the selected min\nand max label values. lq is the q\nchosen."}, {Name: "score", Doc: "score is the score for the selection."}, {Name: "magnitude", Doc: "magnitude is the magnitude of the\nlabel step distance."}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.weights", IDName: "weights", Doc: "weights is a helper type to calcuate the labelling scheme's total score.", Fields: []types.Field{{Name: "simplicity"}, {Name: "coverage"}, {Name: "density"}, {Name: "legibility"}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.LegendPosition", IDName: "legend-position", Doc: "LegendPosition specifies where to put the legend", Fields: []types.Field{{Name: "Top", Doc: "Top and Left specify the location of the legend."}, {Name: "Left", Doc: "Top and Left specify the location of the legend."}, {Name: "XOffs", Doc: "XOffs and YOffs are added to the legend's final position,\nrelative to the relevant anchor position"}, {Name: "YOffs", Doc: "XOffs and YOffs are added to the legend's final position,\nrelative to the relevant anchor position"}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.Legend", IDName: "legend", Doc: "A Legend gives a description of the meaning of different\ndata elements of the plot. Each legend entry has a name\nand a thumbnail, where the thumbnail shows a small\nsample of the display style of the corresponding data.", Fields: []types.Field{{Name: "TextStyle", Doc: "TextStyle is the style given to the legend entry texts."}, {Name: "Position", Doc: "position of the legend"}, {Name: "ThumbnailWidth", Doc: "ThumbnailWidth is the width of legend thumbnails."}, {Name: "Fill", Doc: "Fill specifies the background fill color for the legend box,\nif non-nil."}, {Name: "Entries", Doc: "Entries are all of the LegendEntries described by this legend."}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.Thumbnailer", IDName: "thumbnailer", Doc: "Thumbnailer wraps the Thumbnail method, which\ndraws the small image in a legend representing the\nstyle of data.", Methods: []types.Method{{Name: "Thumbnail", Doc: "Thumbnail draws an thumbnail representing\na legend entry. The thumbnail will usually show\na smaller representation of the style used\nto plot the corresponding data.", Args: []string{"pt"}}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.LegendEntry", IDName: "legend-entry", Doc: "A LegendEntry represents a single line of a legend, it\nhas a name and an icon.", Fields: []types.Field{{Name: "Text", Doc: "text is the text associated with this entry."}, {Name: "Thumbs", Doc: "thumbs is a slice of all of the thumbnails styles"}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.LineStyle", IDName: "line-style", Doc: "LineStyle has style properties for line drawing", Fields: []types.Field{{Name: "Color", Doc: "stroke color image specification; stroking is off if nil"}, {Name: "Width", Doc: "line width"}, {Name: "Dashes", Doc: "Dashes are the dashes of the stroke. Each pair of values specifies\nthe amount to paint and then the amount to skip."}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.Plot", IDName: "plot", Doc: "Plot is the basic type representing a plot.\nIt renders into its own image.RGBA Pixels image,\nand can also save a corresponding SVG version.\nThe Axis ranges are updated automatically when plots\nare added, so setting a fixed range should happen\nafter that point. See [UpdateRange] method as well.", Fields: []types.Field{{Name: "Title", Doc: "Title of the plot"}, {Name: "Background", Doc: "Background is the background of the plot.\nThe default is [colors.Scheme.Surface]."}, {Name: "StandardTextStyle", Doc: "standard text style with default options"}, {Name: "X", Doc: "X and Y are the horizontal and vertical axes\nof the plot respectively."}, {Name: "Y", Doc: "X and Y are the horizontal and vertical axes\nof the plot respectively."}, {Name: "Legend", Doc: "Legend is the plot's legend."}, {Name: "Plotters", Doc: "plotters are drawn by calling their Plot method\nafter the axes are drawn."}, {Name: "Size", Doc: "size is the target size of the image to render to"}, {Name: "DPI", Doc: "DPI is the dots per inch for rendering the image.\nLarger numbers result in larger scaling of the plot contents\nwhich is strongly recommended for print (e.g., use 300 for print)"}, {Name: "Paint", Doc: "painter for rendering"}, {Name: "Pixels", Doc: "pixels that we render into"}, {Name: "PlotBox", Doc: "Current plot bounding box in image coordinates, for plotting coordinates"}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.Plotter", IDName: "plotter", Doc: "Plotter is an interface that wraps the Plot method.\nSome standard implementations of Plotter can be found in plotters.", Methods: []types.Method{{Name: "Plot", Doc: "Plot draws the data to the Plot Paint", Args: []string{"pt"}}, {Name: "XYData", Doc: "returns the data for this plot as X,Y points,\nincluding corresponding pixel data.\nThis allows gui interface to inspect data etc.", Returns: []string{"data", "pixels"}}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.DataRanger", IDName: "data-ranger", Doc: "DataRanger wraps the DataRange method.", Methods: []types.Method{{Name: "DataRange", Doc: "DataRange returns the range of X and Y values.", Returns: []string{"xmin", "xmax", "ymin", "ymax"}}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.TextStyle", IDName: "text-style", Doc: "TextStyle specifies styling parameters for Text elements", Embeds: []types.Field{{Name: "FontRender"}}, Fields: []types.Field{{Name: "Align", Doc: "how to align text along the relevant dimension for the text element"}, {Name: "Padding", Doc: "Padding is used in a case-dependent manner to add space around text elements"}, {Name: "Rotation", Doc: "rotation of the text, in Degrees"}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.Text", IDName: "text", Doc: "Text specifies a single text element in a plot", Fields: []types.Field{{Name: "Text", Doc: "text string, which can use HTML formatting"}, {Name: "Style", Doc: "styling for this text element"}, {Name: "PaintText", Doc: "PaintText is the [paint.Text] for the text."}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.Tick", IDName: "tick", Doc: "A Tick is a single tick mark on an axis.", Fields: []types.Field{{Name: "Value", Doc: "Value is the data value marked by this Tick."}, {Name: "Label", Doc: "Label is the text to display at the tick mark.\nIf Label is an empty string then this is a minor tick mark."}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.Ticker", IDName: "ticker", Doc: "Ticker creates Ticks in a specified range", Methods: []types.Method{{Name: "Ticks", Doc: "Ticks returns Ticks in a specified range", Args: []string{"min", "max"}, Returns: []string{"Tick"}}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.DefaultTicks", IDName: "default-ticks", Doc: "DefaultTicks is suitable for the Ticker field of an Axis,\nit returns a reasonable default set of tick marks."}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.LogTicks", IDName: "log-ticks", Doc: "LogTicks is suitable for the Ticker field of an Axis,\nit returns tick marks suitable for a log-scale axis.", Fields: []types.Field{{Name: "Prec", Doc: "Prec specifies the precision of tick rendering\naccording to the documentation for strconv.FormatFloat."}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.ConstantTicks", IDName: "constant-ticks", Doc: "ConstantTicks is suitable for the Ticker field of an Axis.\nThis function returns the given set of ticks."}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.TimeTicks", IDName: "time-ticks", Doc: "TimeTicks is suitable for axes representing time values.", Fields: []types.Field{{Name: "Ticker", Doc: "Ticker is used to generate a set of ticks.\nIf nil, DefaultTicks will be used."}, {Name: "Format", Doc: "Format is the textual representation of the time value.\nIf empty, time.RFC3339 will be used"}, {Name: "Time", Doc: "Time takes a float32 value and converts it into a time.Time.\nIf nil, UTCUnixTime is used."}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/plot.TickerFunc", IDName: "ticker-func", Doc: "TickerFunc is suitable for the Ticker field of an Axis.\nIt is an adapter which allows to quickly setup a Ticker using a function with an appropriate signature."}) diff --git a/shell/README.md b/shell/README.md deleted file mode 100644 index c66364d59c..0000000000 --- a/shell/README.md +++ /dev/null @@ -1,190 +0,0 @@ -# Cogent Shell (cosh) - -Cogent Shell (cosh) is a shell that combines the best parts of Go and command-based shell languages like `bash` to provide an integrated shell experience that allows you to easily run terminal commands while using Go for complicated logic. This allows you to write concise, elegant, and readable shell code that runs quickly on all platforms, and transpiles to Go (i.e, can be compiled by `go build`). - -The simple idea is that each line is either Go or shell commands, determined in a fairly intuitive way mostly by the content at the start of the line (formal rules below), and they can be intermixed by wrapping Go within `{ }` and shell code from within backticks (\`). We henceforth refer to shell code as `exec` code (in reference to the Go & Cogent `exec` package that we use to execute programs), given the potential ambituity of the entire `cosh` language being the shell. There are different syntactic formatting rules for these two domains of Go and Exec, within cosh: - -* Go code is processed and formatted as usual (e.g., white space is irrelevant, etc). -* Exec code is space separated, like normal command-line invocations. - -Examples: - -```go -for i, f := range cosh.SplitLines(`ls -la`) { // `ls` executes returns string - echo {i} {strings.ToLower(f)} // {} surrounds go within shell -} -``` - -`shell.SplitLines` is a function that runs `strings.Split(arg, "\n")`, defined in the cosh standard library of such frequently-used helper functions. - -You can easily perform handy duration and data size formatting: - -```go -22010706 * time.Nanosecond // 22.010706ms -datasize.Size(44610930) // 42.5 MB -``` - -# Special syntax - -## Multiple statements per line - -* Multiple statements can be combined on one line, separated by `;` as in regular Go and shell languages. Critically, the language determination for the first statement determines the language for the remaining statements; you cannot intermix the two on one line, when using `;` -# Exec mode - -## Environment variables - -* `set ` (space delimited as in all exec mode, no equals) - -## Output redirction - -* Standard output redirect: `>` and `>&` (and `|`, `|&` if needed) - -## Control flow - -* Any error stops the script execution, except for statements wrapped in `[ ]`, indicating an "optional" statement, e.g.: - -```sh -cd some; [mkdir sub]; cd sub -``` - -* `&` at the end of a statement runs in the background (as in bash) -- otherwise it waits until it completes before it continues. - -* `jobs`, `fg`, `bg`, and `kill` builtin commands function as in usual bash. - -## Exec functions (aliases) - -Use the `command` keyword to define new functions for Exec mode execution, which can then be used like any other command, for example: - -```sh -command list { - ls -la args... -} -``` - -```sh -cd data -list *.tsv -``` - -The `command` is transpiled into a Go function that takes `args ...string`. In the command function body, you can use the `args...` expression to pass all of the args, or `args[1]` etc to refer to specific positional indexes, as usual. - -The command function name is registered so that the standard shell execution code can run the function, passing the args. You can also call it directly from Go code using the standard parentheses expression. - -# Script Files and Makefile-like functionality - -As with most scripting languages, a file of cosh code can be made directly executable by appending a "shebang" expression at the start of the file: - -```sh -#!/usr/bin/env cosh -``` - -When executed this way, any additional args are available via an `args []any` variable, which can be passed to a command as follows: -```go -install {args...} -``` -or by referring to specific arg indexes etc. - -To make a script behave like a standard Makefile, you can define different `command`s for each of the make commands, and then add the following at the end of the file to use the args to run commands: - -```go -shell.RunCommands(args) -``` - -See [make](cmd/cosh/testdata/make) for an example, in `cmd/cosh/testdata/make`, which can be run for example using: - -```sh -./make build -``` - -Note that there is nothing special about the name `make` here, so this can be done with any file. - -The `make` package defines a number of useful utility functions that accomplish the standard dependency and file timestamp checking functionality from the standard `make` command, as in the [magefile](https://magefile.org/dependencies/) system. Note that the cosh direct exec command syntax makes the resulting make files much closer to a standard bash-like Makefile, while still having all the benefits of Go control and expressions, compared to magefile. - -TODO: implement and document above. - -# SSH connections to remote hosts - -Any number of active SSH connections can be maintained and used dynamically within a script, including simple ways of copying data among the different hosts (including the local host). The Go mode execution is always on the local host in one running process, and only the shell commands are executed remotely, enabling a unique ability to easily coordinate and distribute processing and data across various hosts. - -Each host maintains its own working directory and environment variables, which can be configured and re-used by default whenever using a given host. - -* `cossh hostname.org [name]` establishes a connection, using given optional name to refer to this connection. If the name is not provided, a sequential number will be used, starting with 1, with 0 referring always to the local host. - -* `@name` then refers to the given host in all subsequent commands, with `@0` referring to the local host where the cosh script is running. - -### Explicit per-command specification of host - -```sh -@name cd subdir; ls -``` - -### Default host - -```sh -@name // or: -cossh @name -``` - -uses the given host for all subsequent commands (unless explicitly specified), until the default is changed. Use `cossh @0` to return to localhost. - -### Redirect input / output among hosts - -The output of a remote host command can be sent to a file on the local host: -```sh -@name cat hostfile.tsv > @0:localfile.tsv -``` -Note the use of the `:` colon delimiter after the host name here. TODO: You cannot send output to a remote host file (e.g., `> @host:remotefile.tsv`) -- maybe with sftp? - -The output of any command can also be piped to a remote host as its standard input: -```sh -ls *.tsv | @host cat > files.txt -``` - -### scp to copy files easily - -The builtin `scp` function allows easy copying of files across hosts, using the persistent connections established with `cossh` instead of creating new connections as in the standard scp command. - -`scp` is _always_ run from the local host, with the remote host filename specified as `@name:remotefile` - -```sh -scp @name:hostfile.tsv localfile.tsv -``` - -TODO: Importantly, file wildcard globbing works as expected: -```sh -scp @name:*.tsv @0:data/ -``` - -and entire directories can be copied, as in `cp -a` or `cp -r` (this behavior is automatic and does not require a flag). - -### Close connections - -```sh -cossh close -``` - -Will close all active connections and return the default host to @0. All active connections are also automatically closed when the shell terminates. - -# Other Utilties - -** need a replacement for findnm -- very powerful but garbage.. - -# Rules for Go vs. Shell determination - -The critical extension from standard Go syntax is for lines that are processed by the `Exec` functions, used for running arbitrary programs on the user's executable path. Here are the rules (word = IDENT token): - -* Backticks "``" anywhere: Exec. Returns a `string`. -* Within Exec, `{}`: Go -* Line starts with `Go` Keyword: Go -* Line is one word: Exec -* Line starts with `path`: Exec -* Line starts with `"string"`: Exec -* Line starts with `word word`: Exec -* Line starts with `word {`: Exec -* Otherwise: Go - -# TODO: - -* likewise, need to run everything effectively as a bg job with our own explicit Wait, which we can then communicate with to move from fg to bg. - - diff --git a/shell/builtins.go b/shell/builtins.go deleted file mode 100644 index 941c24cd53..0000000000 --- a/shell/builtins.go +++ /dev/null @@ -1,308 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package shell - -import ( - "context" - "fmt" - "log/slog" - "os" - "path/filepath" - "strconv" - "strings" - - "cogentcore.org/core/base/exec" - "cogentcore.org/core/base/logx" - "cogentcore.org/core/base/sshclient" - "github.com/mitchellh/go-homedir" -) - -// InstallBuiltins adds the builtin shell commands to [Shell.Builtins]. -func (sh *Shell) InstallBuiltins() { - sh.Builtins = make(map[string]func(cmdIO *exec.CmdIO, args ...string) error) - sh.Builtins["cd"] = sh.Cd - sh.Builtins["exit"] = sh.Exit - sh.Builtins["jobs"] = sh.JobsCmd - sh.Builtins["kill"] = sh.Kill - sh.Builtins["set"] = sh.Set - sh.Builtins["add-path"] = sh.AddPath - sh.Builtins["which"] = sh.Which - sh.Builtins["source"] = sh.Source - sh.Builtins["cossh"] = sh.CoSSH - sh.Builtins["scp"] = sh.Scp - sh.Builtins["debug"] = sh.Debug - sh.Builtins["history"] = sh.History -} - -// Cd changes the current directory. -func (sh *Shell) Cd(cmdIO *exec.CmdIO, args ...string) error { - if len(args) > 1 { - return fmt.Errorf("no more than one argument can be passed to cd") - } - dir := "" - if len(args) == 1 { - dir = args[0] - } - dir, err := homedir.Expand(dir) - if err != nil { - return err - } - if dir == "" { - dir, err = homedir.Dir() - if err != nil { - return err - } - } - dir, err = filepath.Abs(dir) - if err != nil { - return err - } - err = os.Chdir(dir) - if err != nil { - return err - } - sh.Config.Dir = dir - return nil -} - -// Exit exits the shell. -func (sh *Shell) Exit(cmdIO *exec.CmdIO, args ...string) error { - os.Exit(0) - return nil -} - -// Set sets the given environment variable to the given value. -func (sh *Shell) Set(cmdIO *exec.CmdIO, args ...string) error { - if len(args) != 2 { - return fmt.Errorf("expected two arguments, got %d", len(args)) - } - return os.Setenv(args[0], args[1]) -} - -// JobsCmd is the builtin jobs command -func (sh *Shell) JobsCmd(cmdIO *exec.CmdIO, args ...string) error { - for i, jb := range sh.Jobs { - cmdIO.Printf("[%d] %s\n", i+1, jb.String()) - } - return nil -} - -// Kill kills a job by job number or PID. -// Just expands the job id expressions %n into PIDs and calls system kill. -func (sh *Shell) Kill(cmdIO *exec.CmdIO, args ...string) error { - if len(args) == 0 { - return fmt.Errorf("cosh kill: expected at least one argument") - } - sh.JobIDExpand(args) - sh.Config.RunIO(cmdIO, "kill", args...) - return nil -} - -// Fg foregrounds a job by job number -func (sh *Shell) Fg(cmdIO *exec.CmdIO, args ...string) error { - if len(args) != 1 { - return fmt.Errorf("cosh fg: requires exactly one job id argument") - } - jid := args[0] - exp := sh.JobIDExpand(args) - if exp != 1 { - return fmt.Errorf("cosh fg: argument was not a job id in the form %%n") - } - jno, _ := strconv.Atoi(jid[1:]) // guaranteed good - job := sh.Jobs[jno] - cmdIO.Printf("foregrounding job [%d]\n", jno) - _ = job - // todo: the problem here is we need to change the stdio for running job - // job.Cmd.Wait() // wait - // * probably need to have wrapper StdIO for every exec so we can flexibly redirect for fg, bg commands. - // * likewise, need to run everything effectively as a bg job with our own explicit Wait, which we can then communicate with to move from fg to bg. - - return nil -} - -// AddPath adds the given path(s) to $PATH. -func (sh *Shell) AddPath(cmdIO *exec.CmdIO, args ...string) error { - if len(args) == 0 { - return fmt.Errorf("cosh add-path expected at least one argument") - } - path := os.Getenv("PATH") - for _, arg := range args { - arg, err := homedir.Expand(arg) - if err != nil { - return err - } - path = path + ":" + arg - } - return os.Setenv("PATH", path) -} - -// Which reports the executable associated with the given command. -// Processes builtins and commands, and if not found, then passes on -// to exec which. -func (sh *Shell) Which(cmdIO *exec.CmdIO, args ...string) error { - if len(args) != 1 { - return fmt.Errorf("cosh which: requires one argument") - } - cmd := args[0] - if _, hasCmd := sh.Commands[cmd]; hasCmd { - cmdIO.Println(cmd, "is a user-defined command") - return nil - } - if _, hasBlt := sh.Builtins[cmd]; hasBlt { - cmdIO.Println(cmd, "is a cosh builtin command") - return nil - } - sh.Config.RunIO(cmdIO, "which", args...) - return nil -} - -// Source loads and evaluates the given file(s) -func (sh *Shell) Source(cmdIO *exec.CmdIO, args ...string) error { - if len(args) == 0 { - return fmt.Errorf("cosh source: requires at least one argument") - } - for _, fn := range args { - sh.TranspileCodeFromFile(fn) - } - // note that we do not execute the file -- just loads it in - return nil -} - -// CoSSH manages SSH connections, which are referenced by the @name -// identifier. It handles the following cases: -// - @name -- switches to using given host for all subsequent commands -// - host [name] -- connects to a server specified in first arg and switches -// to using it, with optional name instead of default sequential number. -// - close -- closes all open connections, or the specified one -func (sh *Shell) CoSSH(cmdIO *exec.CmdIO, args ...string) error { - if len(args) < 1 { - return fmt.Errorf("cossh: requires at least one argument") - } - cmd := args[0] - var err error - host := "" - name := fmt.Sprintf("%d", 1+len(sh.SSHClients)) - con := false - switch { - case cmd == "close": - sh.CloseSSH() - return nil - case cmd == "@" && len(args) == 2: - name = args[1] - case len(args) == 2: - con = true - host = args[0] - name = args[1] - default: - con = true - host = args[0] - } - if con { - cl := sshclient.NewClient(sh.SSH) - err = cl.Connect(host) - if err != nil { - return err - } - sh.SSHClients[name] = cl - sh.SSHActive = name - } else { - if name == "0" { - sh.SSHActive = "" - } else { - sh.SSHActive = name - cl := sh.ActiveSSH() - if cl == nil { - err = fmt.Errorf("cosh: ssh connection named: %q not found", name) - } - } - } - return err -} - -// Scp performs file copy over SSH connection, with the remote filename -// prefixed with the @name: and the local filename un-prefixed. -// The order is from -> to, as in standard cp. -// The remote filename is automatically relative to the current working -// directory on the remote host. -func (sh *Shell) Scp(cmdIO *exec.CmdIO, args ...string) error { - if len(args) != 2 { - return fmt.Errorf("scp: requires exactly two arguments") - } - var lfn, hfn string - toHost := false - if args[0][0] == '@' { - hfn = args[0] - lfn = args[1] - } else if args[1][0] == '@' { - hfn = args[1] - lfn = args[0] - toHost = true - } else { - return fmt.Errorf("scp: one of the files must a remote host filename, specified by @name:") - } - - ci := strings.Index(hfn, ":") - if ci < 0 { - return fmt.Errorf("scp: remote host filename does not contain a : after the host name") - } - host := hfn[1:ci] - hfn = hfn[ci+1:] - - cl, err := sh.SSHByHost(host) - if err != nil { - return err - } - - ctx := sh.Ctx - if ctx == nil { - ctx = context.Background() - } - - if toHost { - err = cl.CopyLocalFileToHost(ctx, lfn, hfn) - } else { - err = cl.CopyHostToLocalFile(ctx, hfn, lfn) - } - return err -} - -// Debug changes log level -func (sh *Shell) Debug(cmdIO *exec.CmdIO, args ...string) error { - if len(args) == 0 { - if logx.UserLevel == slog.LevelDebug { - logx.UserLevel = slog.LevelInfo - } else { - logx.UserLevel = slog.LevelDebug - } - } - if len(args) == 1 { - lev := args[0] - if lev == "on" || lev == "true" || lev == "1" { - logx.UserLevel = slog.LevelDebug - } else { - logx.UserLevel = slog.LevelInfo - } - } - return nil -} - -// History shows history -func (sh *Shell) History(cmdIO *exec.CmdIO, args ...string) error { - n := len(sh.Hist) - nh := n - if len(args) == 1 { - an, err := strconv.Atoi(args[0]) - if err != nil { - return fmt.Errorf("history: error parsing number of history items: %q, error: %s", args[0], err.Error()) - } - nh = min(n, an) - } else if len(args) > 1 { - return fmt.Errorf("history: uses at most one argument") - } - for i := n - nh; i < n; i++ { - cmdIO.Printf("%d:\t%s\n", i, sh.Hist[i]) - } - return nil -} diff --git a/shell/cmd/cosh/cfg.cosh b/shell/cmd/cosh/cfg.cosh deleted file mode 100644 index d29e45757f..0000000000 --- a/shell/cmd/cosh/cfg.cosh +++ /dev/null @@ -1,8 +0,0 @@ -set ANDROID_HOME ~/Library/Android/sdk - -add-path ~/go/bin ~/bin /usr/local/bin /opt/homebrew/bin /opt/homebrew/sbin /opt/homebrew/opt/openjdk/bin $ANDROID_HOME/tools $ANDROID_HOME/platform-tools /Library/TeX/texbin - -command list { - ls -la -} - diff --git a/shell/cmd/cosh/cosh.go b/shell/cmd/cosh/cosh.go deleted file mode 100644 index 9ef4b3cd2d..0000000000 --- a/shell/cmd/cosh/cosh.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Command cosh is an interactive cli for running and compiling Cogent Shell (cosh). -package main - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - "cogentcore.org/core/base/errors" - "cogentcore.org/core/base/fsx" - "cogentcore.org/core/cli" - "cogentcore.org/core/shell" - "cogentcore.org/core/shell/interpreter" - "github.com/cogentcore/yaegi/interp" -) - -//go:generate core generate -add-types -add-funcs - -// Config is the configuration information for the cosh cli. -type Config struct { - - // Input is the input file to run/compile. - // If this is provided as the first argument, - // then the program will exit after running, - // unless the Interactive mode is flagged. - Input string `posarg:"0" required:"-"` - - // Expr is an optional expression to evaluate, which can be used - // in addition to the Input file to run, to execute commands - // defined within that file for example, or as a command to run - // prior to starting interactive mode if no Input is specified. - Expr string `flag:"e,expr"` - - // Args is an optional list of arguments to pass in the run command. - // These arguments will be turned into an "args" local variable in the shell. - // These are automatically processed from any leftover arguments passed, so - // you should not need to specify this flag manually. - Args []string `cmd:"run" posarg:"leftover" required:"-"` - - // Interactive runs the interactive command line after processing any input file. - // Interactive mode is the default mode for the run command unless an input file - // is specified. - Interactive bool `cmd:"run" flag:"i,interactive"` -} - -func main() { //types:skip - opts := cli.DefaultOptions("cosh", "An interactive tool for running and compiling Cogent Shell (cosh).") - cli.Run(opts, &Config{}, Run, Build) -} - -// Run runs the specified cosh file. If no file is specified, -// it runs an interactive shell that allows the user to input cosh. -func Run(c *Config) error { //cli:cmd -root - in := interpreter.NewInterpreter(interp.Options{}) - in.Config() - if len(c.Args) > 0 { - in.Eval("args := cosh.StringsToAnys(" + fmt.Sprintf("%#v)", c.Args)) - } - - if c.Input == "" { - return Interactive(c, in) - } - code := "" - if errors.Log1(fsx.FileExists(c.Input)) { - b, err := os.ReadFile(c.Input) - if err != nil && c.Expr == "" { - return err - } - code = string(b) - } - if c.Expr != "" { - if code != "" { - code += "\n" - } - code += c.Expr + "\n" - } - - _, _, err := in.Eval(code) - if err == nil { - err = in.Shell.DepthError() - } - if c.Interactive { - return Interactive(c, in) - } - return err -} - -// Interactive runs an interactive shell that allows the user to input cosh. -func Interactive(c *Config, in *interpreter.Interpreter) error { - if c.Expr != "" { - in.Eval(c.Expr) - } - in.Interactive() - return nil -} - -// Build builds the specified input cosh file, or all .cosh files in the current -// directory if no input is specified, to corresponding .go file name(s). -// If the file does not already contain a "package" specification, then -// "package main; func main()..." wrappers are added, which allows the same -// code to be used in interactive and Go compiled modes. -func Build(c *Config) error { - var fns []string - if c.Input != "" { - fns = []string{c.Input} - } else { - fns = fsx.Filenames(".", ".cosh") - } - var errs []error - for _, fn := range fns { - ofn := strings.TrimSuffix(fn, filepath.Ext(fn)) + ".go" - err := shell.NewShell().TranspileFile(fn, ofn) - if err != nil { - errs = append(errs, err) - } - } - return errors.Join(errs...) -} diff --git a/shell/cmd/cosh/test.cosh b/shell/cmd/cosh/test.cosh deleted file mode 100644 index dda7a55b21..0000000000 --- a/shell/cmd/cosh/test.cosh +++ /dev/null @@ -1,8 +0,0 @@ -// test file for cosh cli - -// todo: doesn't work: #1152 -echo {args} - -for i, fn := range cosh.SplitLines(`/bin/ls -1`) { - fmt.Println(i, fn) -} diff --git a/shell/cmd/cosh/testdata/make b/shell/cmd/cosh/testdata/make deleted file mode 100755 index a47bd26d3f..0000000000 --- a/shell/cmd/cosh/testdata/make +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env cosh -// test makefile for cosh. -// example usage: -// ./make build - -command build { - println("running the build command") -} - -command test { - println("running the test command") -} - -shell.RunCommands(args) - diff --git a/shell/cmd/cosh/typegen.go b/shell/cmd/cosh/typegen.go deleted file mode 100644 index 9dcf995c36..0000000000 --- a/shell/cmd/cosh/typegen.go +++ /dev/null @@ -1,15 +0,0 @@ -// Code generated by "core generate -add-types -add-funcs"; DO NOT EDIT. - -package main - -import ( - "cogentcore.org/core/types" -) - -var _ = types.AddType(&types.Type{Name: "main.Config", IDName: "config", Doc: "Config is the configuration information for the cosh cli.", Directives: []types.Directive{{Tool: "go", Directive: "generate", Args: []string{"core", "generate", "-add-types", "-add-funcs"}}}, Fields: []types.Field{{Name: "Input", Doc: "Input is the input file to run/compile.\nIf this is provided as the first argument,\nthen the program will exit after running,\nunless the Interactive mode is flagged."}, {Name: "Expr", Doc: "Expr is an optional expression to evaluate, which can be used\nin addition to the Input file to run, to execute commands\ndefined within that file for example, or as a command to run\nprior to starting interactive mode if no Input is specified."}, {Name: "Args", Doc: "Args is an optional list of arguments to pass in the run command.\nThese arguments will be turned into an \"args\" local variable in the shell.\nThese are automatically processed from any leftover arguments passed, so\nyou should not need to specify this flag manually."}, {Name: "Interactive", Doc: "Interactive runs the interactive command line after processing any input file.\nInteractive mode is the default mode for the run command unless an input file\nis specified."}}}) - -var _ = types.AddFunc(&types.Func{Name: "main.Run", Doc: "Run runs the specified cosh file. If no file is specified,\nit runs an interactive shell that allows the user to input cosh.", Directives: []types.Directive{{Tool: "cli", Directive: "cmd", Args: []string{"-root"}}}, Args: []string{"c"}, Returns: []string{"error"}}) - -var _ = types.AddFunc(&types.Func{Name: "main.Interactive", Doc: "Interactive runs an interactive shell that allows the user to input cosh.", Args: []string{"c", "in"}, Returns: []string{"error"}}) - -var _ = types.AddFunc(&types.Func{Name: "main.Build", Doc: "Build builds the specified input cosh file, or all .cosh files in the current\ndirectory if no input is specified, to corresponding .go file name(s).\nIf the file does not already contain a \"package\" specification, then\n\"package main; func main()...\" wrappers are added, which allows the same\ncode to be used in interactive and Go compiled modes.", Args: []string{"c"}, Returns: []string{"error"}}) diff --git a/shell/complete.go b/shell/complete.go deleted file mode 100644 index bf79e0bf92..0000000000 --- a/shell/complete.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package shell - -import ( - "os" - "path/filepath" - "strings" - - "cogentcore.org/core/base/errors" - "cogentcore.org/core/icons" - "cogentcore.org/core/parse/complete" - "github.com/mitchellh/go-homedir" -) - -// CompleteMatch is the [complete.MatchFunc] for the shell. -func (sh *Shell) CompleteMatch(data any, text string, posLine, posChar int) (md complete.Matches) { - comps := complete.Completions{} - text = text[:posChar] - md.Seed = complete.SeedPath(text) - fullPath := complete.SeedSpace(text) - fullPath = errors.Log1(homedir.Expand(fullPath)) - parent := strings.TrimSuffix(fullPath, md.Seed) - dir := filepath.Join(sh.Config.Dir, parent) - if filepath.IsAbs(parent) { - dir = parent - } - entries := errors.Log1(os.ReadDir(dir)) - for _, entry := range entries { - icon := icons.File - if entry.IsDir() { - icon = icons.Folder - } - name := strings.ReplaceAll(entry.Name(), " ", `\ `) // escape spaces - comps = append(comps, complete.Completion{ - Text: name, - Icon: icon, - Desc: filepath.Join(sh.Config.Dir, name), - }) - } - if parent == "" { - for cmd := range sh.Builtins { - comps = append(comps, complete.Completion{ - Text: cmd, - Icon: icons.Terminal, - Desc: "Builtin command: " + cmd, - }) - } - for cmd := range sh.Commands { - comps = append(comps, complete.Completion{ - Text: cmd, - Icon: icons.Terminal, - Desc: "Command: " + cmd, - }) - } - // todo: write something that looks up all files on path -- should cache that per - // path string setting - } - md.Matches = complete.MatchSeedCompletion(comps, md.Seed) - return md -} - -// CompleteEdit is the [complete.EditFunc] for the shell. -func (sh *Shell) CompleteEdit(data any, text string, cursorPos int, completion complete.Completion, seed string) (ed complete.Edit) { - return complete.EditWord(text, cursorPos, completion.Text, seed) -} - -// ReadlineCompleter implements [github.com/ergochat/readline.AutoCompleter]. -type ReadlineCompleter struct { - Shell *Shell -} - -func (rc *ReadlineCompleter) Do(line []rune, pos int) (newLine [][]rune, length int) { - text := string(line) - md := rc.Shell.CompleteMatch(nil, text, 0, pos) - res := [][]rune{} - for _, match := range md.Matches { - after := strings.TrimPrefix(match.Text, md.Seed) - if md.Seed != "" && after == match.Text { - continue // no overlap - } - if match.Icon == icons.Folder { - after += string(filepath.Separator) - } else { - after += " " - } - res = append(res, []rune(after)) - } - return res, len(md.Seed) -} diff --git a/shell/cosh/coshlib.go b/shell/cosh/coshlib.go deleted file mode 100644 index ddd1499063..0000000000 --- a/shell/cosh/coshlib.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cosh defines convenient utility functions for -// use in the cosh shell, available with the cosh prefix. -package cosh - -import ( - "io/fs" - "os" - "path/filepath" - "strings" - - "cogentcore.org/core/base/errors" - "cogentcore.org/core/base/fsx" - "cogentcore.org/core/base/slicesx" - "cogentcore.org/core/base/stringsx" -) - -// SplitLines returns a slice of given string split by lines -// with any extra whitespace trimmed for each line entry. -func SplitLines(s string) []string { - sl := stringsx.SplitLines(s) - for i, s := range sl { - sl[i] = strings.TrimSpace(s) - } - return sl -} - -// FileExists returns true if given file exists -func FileExists(path string) bool { - ex := errors.Log1(fsx.FileExists(path)) - return ex -} - -// WriteFile writes string to given file with standard permissions, -// logging any errors. -func WriteFile(filename, str string) error { - err := os.WriteFile(filename, []byte(str), 0666) - if err != nil { - errors.Log(err) - } - return err -} - -// ReadFile reads the string from the given file, logging any errors. -func ReadFile(filename string) string { - str, err := os.ReadFile(filename) - if err != nil { - errors.Log(err) - } - return string(str) -} - -// ReplaceInFile replaces all occurrences of given string with replacement -// in given file, rewriting the file. Also returns the updated string. -func ReplaceInFile(filename, old, new string) string { - str := ReadFile(filename) - str = strings.ReplaceAll(str, old, new) - WriteFile(filename, str) - return str -} - -// StringsToAnys converts a slice of strings to a slice of any, -// using slicesx.ToAny. The interpreter cannot process generics -// yet, so this wrapper is needed. Use for passing args to -// a command, for example. -func StringsToAnys(s []string) []any { - return slicesx.As[string, any](s) -} - -// AllFiles returns a list of all files (excluding directories) -// under the given path. -func AllFiles(path string) []string { - var files []string - filepath.WalkDir(path, func(path string, d fs.DirEntry, err error) error { - if err != nil { - return err - } - if d.IsDir() { - return nil - } - files = append(files, path) - return nil - }) - return files -} diff --git a/shell/exec.go b/shell/exec.go deleted file mode 100644 index e79cb151cc..0000000000 --- a/shell/exec.go +++ /dev/null @@ -1,334 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package shell - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "slices" - "strings" - - "cogentcore.org/core/base/exec" - "cogentcore.org/core/base/reflectx" - "cogentcore.org/core/base/sshclient" - "github.com/mitchellh/go-homedir" -) - -// Exec handles command execution for all cases, parameterized by the args. -// It executes the given command string, waiting for the command to finish, -// handling the given arguments appropriately. -// If there is any error, it adds it to the shell, and triggers CancelExecution. -// - errOk = don't call AddError so execution will not stop on error -// - start = calls Start on the command, which then runs asynchronously, with -// a goroutine forked to Wait for it and close its IO -// - output = return the output of the command as a string (otherwise return is "") -func (sh *Shell) Exec(errOk, start, output bool, cmd any, args ...any) string { - out := "" - if !errOk && len(sh.Errors) > 0 { - return out - } - cmdIO := exec.NewCmdIO(&sh.Config) - cmdIO.StackStart() - if start { - cmdIO.PushIn(nil) // no stdin for bg - } - cl, scmd, sargs := sh.ExecArgs(cmdIO, errOk, cmd, args...) - if scmd == "" { - return out - } - var err error - if cl != nil { - switch { - case start: - err = cl.Start(&cmdIO.StdIOState, scmd, sargs...) - case output: - cmdIO.PushOut(nil) - out, err = cl.Output(&cmdIO.StdIOState, scmd, sargs...) - default: - err = cl.Run(&cmdIO.StdIOState, scmd, sargs...) - } - if !errOk { - sh.AddError(err) - } - } else { - ran := false - ran, out = sh.RunBuiltinOrCommand(cmdIO, errOk, output, scmd, sargs...) - if !ran { - sh.isCommand.Push(false) - switch { - case start: - err = sh.Config.StartIO(cmdIO, scmd, sargs...) - sh.Jobs.Push(cmdIO) - go func() { - if !cmdIO.OutIsPipe() { - fmt.Printf("[%d] %s\n", len(sh.Jobs), cmdIO.String()) - } - cmdIO.Cmd.Wait() - cmdIO.PopToStart() - sh.DeleteJob(cmdIO) - }() - case output: - cmdIO.PushOut(nil) - out, err = sh.Config.OutputIO(cmdIO, scmd, sargs...) - default: - err = sh.Config.RunIO(cmdIO, scmd, sargs...) - } - if !errOk { - sh.AddError(err) - } - sh.isCommand.Pop() - } - } - if !start { - cmdIO.PopToStart() - } - return out -} - -// RunBuiltinOrCommand runs a builtin or a command, returning true if it ran, -// and the output string if running in output mode. -func (sh *Shell) RunBuiltinOrCommand(cmdIO *exec.CmdIO, errOk, output bool, cmd string, args ...string) (bool, string) { - out := "" - cmdFun, hasCmd := sh.Commands[cmd] - bltFun, hasBlt := sh.Builtins[cmd] - - if !hasCmd && !hasBlt { - return false, out - } - - if hasCmd { - sh.commandArgs.Push(args) - sh.isCommand.Push(true) - } - - // note: we need to set both os. and wrapper versions, so it works the same - // in compiled vs. interpreted mode - oldsh := sh.Config.StdIO.Set(&cmdIO.StdIO) - oldwrap := sh.StdIOWrappers.SetWrappers(&cmdIO.StdIO) - oldstd := cmdIO.SetToOS() - if output { - obuf := &bytes.Buffer{} - // os.Stdout = obuf // needs a file - sh.Config.StdIO.Out = obuf - sh.StdIOWrappers.SetWrappedOut(obuf) - cmdIO.PushOut(obuf) - if hasCmd { - cmdFun(args...) - } else { - sh.AddError(bltFun(cmdIO, args...)) - } - out = strings.TrimSuffix(obuf.String(), "\n") - } else { - if hasCmd { - cmdFun(args...) - } else { - sh.AddError(bltFun(cmdIO, args...)) - } - } - - if hasCmd { - sh.isCommand.Pop() - sh.commandArgs.Pop() - } - oldstd.SetToOS() - sh.StdIOWrappers.SetWrappers(oldwrap) - sh.Config.StdIO = *oldsh - - return true, out -} - -func (sh *Shell) HandleArgErr(errok bool, err error) error { - if err == nil { - return err - } - if errok { - sh.Config.StdIO.ErrPrintln(err.Error()) - } else { - sh.AddError(err) - } - return err -} - -// ExecArgs processes the args to given exec command, -// handling all of the input / output redirection and -// file globbing, homedir expansion, etc. -func (sh *Shell) ExecArgs(cmdIO *exec.CmdIO, errOk bool, cmd any, args ...any) (*sshclient.Client, string, []string) { - if len(sh.Jobs) > 0 { - jb := sh.Jobs.Peek() - if jb.OutIsPipe() { - cmdIO.PushIn(jb.PipeIn.Peek()) - } - } - scmd := reflectx.ToString(cmd) - cl := sh.ActiveSSH() - isCmd := sh.isCommand.Peek() - sargs := make([]string, 0, len(args)) - var err error - for _, a := range args { - s := reflectx.ToString(a) - if s == "" { - continue - } - if cl == nil { - s, err = homedir.Expand(s) - sh.HandleArgErr(errOk, err) - // note: handling globbing in a later pass, to not clutter.. - } else { - if s[0] == '~' { - s = "$HOME/" + s[1:] - } - } - sargs = append(sargs, s) - } - if scmd[0] == '@' { - newHost := "" - if scmd == "@0" { // local - cl = nil - } else { - hnm := scmd[1:] - if scl, ok := sh.SSHClients[hnm]; ok { - newHost = hnm - cl = scl - } else { - sh.HandleArgErr(errOk, fmt.Errorf("cosh: ssh connection named: %q not found", hnm)) - } - } - if len(sargs) > 0 { - scmd = sargs[0] - sargs = sargs[1:] - } else { // just a ssh switch - sh.SSHActive = newHost - return nil, "", nil - } - } - for i := 0; i < len(sargs); i++ { // we modify so no range - s := sargs[i] - switch { - case s[0] == '>': - sargs = sh.OutToFile(cl, cmdIO, errOk, sargs, i) - case s[0] == '|': - sargs = sh.OutToPipe(cl, cmdIO, errOk, sargs, i) - case cl == nil && isCmd && strings.HasPrefix(s, "args"): - sargs = sh.CmdArgs(errOk, sargs, i) - i-- // back up because we consume this one - } - } - // do globbing late here so we don't have to wade through everything. - // only for local. - if cl == nil { - gargs := make([]string, 0, len(sargs)) - for _, s := range sargs { - g, err := filepath.Glob(s) - if err != nil || len(g) == 0 { // not valid - gargs = append(gargs, s) - } else { - gargs = append(gargs, g...) - } - } - sargs = gargs - } - return cl, scmd, sargs -} - -// OutToFile processes the > arg that sends output to a file -func (sh *Shell) OutToFile(cl *sshclient.Client, cmdIO *exec.CmdIO, errOk bool, sargs []string, i int) []string { - n := len(sargs) - s := sargs[i] - sn := len(s) - fn := "" - narg := 1 - if i < n-1 { - fn = sargs[i+1] - narg = 2 - } - appn := false - errf := false - switch { - case sn > 1 && s[1] == '>': - appn = true - if sn > 2 && s[2] == '&' { - errf = true - } - case sn > 1 && s[1] == '&': - errf = true - case sn > 1: - fn = s[1:] - narg = 1 - } - if fn == "" { - sh.HandleArgErr(errOk, fmt.Errorf("cosh: no output file specified")) - return sargs - } - if cl != nil { - if !strings.HasPrefix(fn, "@0:") { - return sargs - } - fn = fn[3:] - } - sargs = slices.Delete(sargs, i, i+narg) - // todo: process @n: expressions here -- if @0 then it is the same - // if @1, then need to launch an ssh "cat >[>] file" with pipe from command as stdin - var f *os.File - var err error - if appn { - f, err = os.OpenFile(fn, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - } else { - f, err = os.Create(fn) - } - if err == nil { - cmdIO.PushOut(f) - if errf { - cmdIO.PushErr(f) - } - } else { - sh.HandleArgErr(errOk, err) - } - return sargs -} - -// OutToPipe processes the | arg that sends output to a pipe -func (sh *Shell) OutToPipe(cl *sshclient.Client, cmdIO *exec.CmdIO, errOk bool, sargs []string, i int) []string { - s := sargs[i] - sn := len(s) - errf := false - if sn > 1 && s[1] == '&' { - errf = true - } - // todo: what to do here? - sargs = slices.Delete(sargs, i, i+1) - cmdIO.PushOutPipe() - if errf { - cmdIO.PushErr(cmdIO.Out) - } - // sh.HandleArgErr(errok, err) - return sargs -} - -// CmdArgs processes expressions involving "args" for commands -func (sh *Shell) CmdArgs(errOk bool, sargs []string, i int) []string { - // n := len(sargs) - // s := sargs[i] - // sn := len(s) - args := sh.commandArgs.Peek() - - // fmt.Println("command args:", args) - - switch { - case sargs[i] == "args...": - sargs = slices.Delete(sargs, i, i+1) - sargs = slices.Insert(sargs, i, args...) - } - - return sargs -} - -// CancelExecution calls the Cancel() function if set. -func (sh *Shell) CancelExecution() { - if sh.Cancel != nil { - sh.Cancel() - } -} diff --git a/shell/exec_test.go b/shell/exec_test.go deleted file mode 100644 index 657bf0275d..0000000000 --- a/shell/exec_test.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package shell - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestExec(t *testing.T) { - assert.Equal(t, "hi", NewShell().Output("echo", "hi")) -} diff --git a/shell/execwords.go b/shell/execwords.go deleted file mode 100644 index e5515195fe..0000000000 --- a/shell/execwords.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package shell - -import ( - "fmt" - "strings" - "unicode" -) - -func ExecWords(ln string) ([]string, error) { - ln = strings.TrimSpace(ln) - n := len(ln) - if n == 0 { - return nil, nil - } - - word := "" - esc := false - dQuote := false - bQuote := false - brace := 0 - brack := 0 - redir := false - - var words []string - addWord := func() { - if brace > 0 { // always accum into one token inside brace - return - } - if len(word) > 0 { - words = append(words, word) - word = "" - } - } - - atStart := true - sbrack := (ln[0] == '[') - if sbrack { - word = "[" - addWord() - brack++ - ln = ln[1:] - atStart = false - } - - for _, r := range ln { - quote := dQuote || bQuote - - if redir { - redir = false - if r == '&' { - word += string(r) - addWord() - continue - } - if r == '>' { - word += string(r) - redir = true - continue - } - addWord() - } - - switch { - case esc: - if brace == 0 && unicode.IsSpace(r) { // we will be quoted later anyway - word = word[:len(word)-1] - } - word += string(r) - esc = false - case r == '\\': - esc = true - word += string(r) - case r == '"': - if !bQuote { - dQuote = !dQuote - } - word += string(r) - case r == '`': - if !dQuote { - bQuote = !bQuote - } - word += string(r) - case quote: // absorbs quote -- no need to check below - word += string(r) - case unicode.IsSpace(r): - addWord() - continue // don't reset at start - case r == '{': - if brace == 0 { - addWord() - word = "{" - addWord() - } - brace++ - case r == '}': - brace-- - if brace == 0 { - addWord() - word = "}" - addWord() - } - case r == '[': - word += string(r) - if atStart && brack == 0 { - sbrack = true - addWord() - } - brack++ - case r == ']': - brack-- - if brack == 0 && sbrack { // only point of tracking brack is to get this end guy - addWord() - word = "]" - addWord() - } else { - word += string(r) - } - case r == '<' || r == '>' || r == '|': - addWord() - word += string(r) - redir = true - case r == '&': // known to not be redir - addWord() - word += string(r) - case r == ';': - addWord() - word += string(r) - addWord() - atStart = true - continue // avoid reset - default: - word += string(r) - } - atStart = false - } - addWord() - if dQuote || bQuote || brack > 0 { - return words, fmt.Errorf("cosh: exec command has unterminated quotes (\": %v, `: %v) or brackets [ %v ]", dQuote, bQuote, brack > 0) - } - return words, nil -} - -// ExecWordIsCommand returns true if given exec word is a command-like string -// (excluding any paths) -func ExecWordIsCommand(f string) bool { - if strings.Contains(f, "(") || strings.Contains(f, "=") { - return false - } - return true -} diff --git a/shell/interpreter/cogentcore_org-core-base-datasize.go b/shell/interpreter/cogentcore_org-core-base-datasize.go deleted file mode 100644 index cafb889317..0000000000 --- a/shell/interpreter/cogentcore_org-core-base-datasize.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by 'yaegi extract cogentcore.org/core/base/datasize'. DO NOT EDIT. - -package interpreter - -import ( - "cogentcore.org/core/base/datasize" - "reflect" -) - -func init() { - Symbols["cogentcore.org/core/base/datasize/datasize"] = map[string]reflect.Value{ - // function, constant and variable definitions - "B": reflect.ValueOf(datasize.B), - "EB": reflect.ValueOf(datasize.EB), - "ErrBits": reflect.ValueOf(&datasize.ErrBits).Elem(), - "GB": reflect.ValueOf(datasize.GB), - "KB": reflect.ValueOf(datasize.KB), - "MB": reflect.ValueOf(datasize.MB), - "MustParse": reflect.ValueOf(datasize.MustParse), - "MustParseString": reflect.ValueOf(datasize.MustParseString), - "PB": reflect.ValueOf(datasize.PB), - "Parse": reflect.ValueOf(datasize.Parse), - "ParseString": reflect.ValueOf(datasize.ParseString), - "TB": reflect.ValueOf(datasize.TB), - - // type definitions - "Size": reflect.ValueOf((*datasize.Size)(nil)), - } -} diff --git a/shell/interpreter/cogentcore_org-core-base-elide.go b/shell/interpreter/cogentcore_org-core-base-elide.go deleted file mode 100644 index c2cff7e20f..0000000000 --- a/shell/interpreter/cogentcore_org-core-base-elide.go +++ /dev/null @@ -1,17 +0,0 @@ -// Code generated by 'yaegi extract cogentcore.org/core/base/elide'. DO NOT EDIT. - -package interpreter - -import ( - "cogentcore.org/core/base/elide" - "reflect" -) - -func init() { - Symbols["cogentcore.org/core/base/elide/elide"] = map[string]reflect.Value{ - // function, constant and variable definitions - "AppName": reflect.ValueOf(elide.AppName), - "End": reflect.ValueOf(elide.End), - "Middle": reflect.ValueOf(elide.Middle), - } -} diff --git a/shell/interpreter/cogentcore_org-core-base-errors.go b/shell/interpreter/cogentcore_org-core-base-errors.go deleted file mode 100644 index f595569dde..0000000000 --- a/shell/interpreter/cogentcore_org-core-base-errors.go +++ /dev/null @@ -1,25 +0,0 @@ -// Code generated by 'yaegi extract cogentcore.org/core/base/errors'. DO NOT EDIT. - -package interpreter - -import ( - "cogentcore.org/core/base/errors" - "github.com/cogentcore/yaegi/interp" - "reflect" -) - -func init() { - Symbols["cogentcore.org/core/base/errors/errors"] = map[string]reflect.Value{ - // function, constant and variable definitions - "As": reflect.ValueOf(errors.As), - "CallerInfo": reflect.ValueOf(errors.CallerInfo), - "ErrUnsupported": reflect.ValueOf(&errors.ErrUnsupported).Elem(), - "Is": reflect.ValueOf(errors.Is), - "Join": reflect.ValueOf(errors.Join), - "Log": reflect.ValueOf(errors.Log), - "Log1": reflect.ValueOf(interp.GenericFunc("func Log1[T any](v T, err error) T { //yaegi:add\n\tif err != nil {\n\t\tslog.Error(err.Error() + \" | \" + CallerInfo())\n\t}\n\treturn v\n}")), - "Must": reflect.ValueOf(errors.Must), - "New": reflect.ValueOf(errors.New), - "Unwrap": reflect.ValueOf(errors.Unwrap), - } -} diff --git a/shell/interpreter/cogentcore_org-core-base-fsx.go b/shell/interpreter/cogentcore_org-core-base-fsx.go deleted file mode 100644 index abc1208cb4..0000000000 --- a/shell/interpreter/cogentcore_org-core-base-fsx.go +++ /dev/null @@ -1,27 +0,0 @@ -// Code generated by 'yaegi extract cogentcore.org/core/base/fsx'. DO NOT EDIT. - -package interpreter - -import ( - "cogentcore.org/core/base/fsx" - "reflect" -) - -func init() { - Symbols["cogentcore.org/core/base/fsx/fsx"] = map[string]reflect.Value{ - // function, constant and variable definitions - "DirAndFile": reflect.ValueOf(fsx.DirAndFile), - "DirFS": reflect.ValueOf(fsx.DirFS), - "Dirs": reflect.ValueOf(fsx.Dirs), - "FileExists": reflect.ValueOf(fsx.FileExists), - "FileExistsFS": reflect.ValueOf(fsx.FileExistsFS), - "Filenames": reflect.ValueOf(fsx.Filenames), - "Files": reflect.ValueOf(fsx.Files), - "FindFilesOnPaths": reflect.ValueOf(fsx.FindFilesOnPaths), - "GoSrcDir": reflect.ValueOf(fsx.GoSrcDir), - "HasFile": reflect.ValueOf(fsx.HasFile), - "LatestMod": reflect.ValueOf(fsx.LatestMod), - "RelativeFilePath": reflect.ValueOf(fsx.RelativeFilePath), - "Sub": reflect.ValueOf(fsx.Sub), - } -} diff --git a/shell/interpreter/cogentcore_org-core-base-strcase.go b/shell/interpreter/cogentcore_org-core-base-strcase.go deleted file mode 100644 index 4c65387f68..0000000000 --- a/shell/interpreter/cogentcore_org-core-base-strcase.go +++ /dev/null @@ -1,54 +0,0 @@ -// Code generated by 'yaegi extract cogentcore.org/core/base/strcase'. DO NOT EDIT. - -package interpreter - -import ( - "cogentcore.org/core/base/strcase" - "reflect" -) - -func init() { - Symbols["cogentcore.org/core/base/strcase/strcase"] = map[string]reflect.Value{ - // function, constant and variable definitions - "CamelCase": reflect.ValueOf(strcase.CamelCase), - "CasesN": reflect.ValueOf(strcase.CasesN), - "CasesValues": reflect.ValueOf(strcase.CasesValues), - "FormatList": reflect.ValueOf(strcase.FormatList), - "KEBABCase": reflect.ValueOf(strcase.KEBABCase), - "KebabCase": reflect.ValueOf(strcase.KebabCase), - "LowerCamelCase": reflect.ValueOf(strcase.LowerCamelCase), - "LowerCase": reflect.ValueOf(strcase.LowerCase), - "Noop": reflect.ValueOf(strcase.Noop), - "SNAKECase": reflect.ValueOf(strcase.SNAKECase), - "SentenceCase": reflect.ValueOf(strcase.SentenceCase), - "Skip": reflect.ValueOf(strcase.Skip), - "SkipSplit": reflect.ValueOf(strcase.SkipSplit), - "SnakeCase": reflect.ValueOf(strcase.SnakeCase), - "Split": reflect.ValueOf(strcase.Split), - "TitleCase": reflect.ValueOf(strcase.TitleCase), - "To": reflect.ValueOf(strcase.To), - "ToCamel": reflect.ValueOf(strcase.ToCamel), - "ToKEBAB": reflect.ValueOf(strcase.ToKEBAB), - "ToKebab": reflect.ValueOf(strcase.ToKebab), - "ToLowerCamel": reflect.ValueOf(strcase.ToLowerCamel), - "ToSNAKE": reflect.ValueOf(strcase.ToSNAKE), - "ToSentence": reflect.ValueOf(strcase.ToSentence), - "ToSnake": reflect.ValueOf(strcase.ToSnake), - "ToTitle": reflect.ValueOf(strcase.ToTitle), - "ToWordCase": reflect.ValueOf(strcase.ToWordCase), - "UpperCase": reflect.ValueOf(strcase.UpperCase), - "WordCamelCase": reflect.ValueOf(strcase.WordCamelCase), - "WordCasesN": reflect.ValueOf(strcase.WordCasesN), - "WordCasesValues": reflect.ValueOf(strcase.WordCasesValues), - "WordLowerCase": reflect.ValueOf(strcase.WordLowerCase), - "WordOriginal": reflect.ValueOf(strcase.WordOriginal), - "WordSentenceCase": reflect.ValueOf(strcase.WordSentenceCase), - "WordTitleCase": reflect.ValueOf(strcase.WordTitleCase), - "WordUpperCase": reflect.ValueOf(strcase.WordUpperCase), - - // type definitions - "Cases": reflect.ValueOf((*strcase.Cases)(nil)), - "SplitAction": reflect.ValueOf((*strcase.SplitAction)(nil)), - "WordCases": reflect.ValueOf((*strcase.WordCases)(nil)), - } -} diff --git a/shell/interpreter/cogentcore_org-core-base-stringsx.go b/shell/interpreter/cogentcore_org-core-base-stringsx.go deleted file mode 100644 index f4f77cf0e5..0000000000 --- a/shell/interpreter/cogentcore_org-core-base-stringsx.go +++ /dev/null @@ -1,19 +0,0 @@ -// Code generated by 'yaegi extract cogentcore.org/core/base/stringsx'. DO NOT EDIT. - -package interpreter - -import ( - "cogentcore.org/core/base/stringsx" - "reflect" -) - -func init() { - Symbols["cogentcore.org/core/base/stringsx/stringsx"] = map[string]reflect.Value{ - // function, constant and variable definitions - "ByteSplitLines": reflect.ValueOf(stringsx.ByteSplitLines), - "ByteTrimCR": reflect.ValueOf(stringsx.ByteTrimCR), - "InsertFirstUnique": reflect.ValueOf(stringsx.InsertFirstUnique), - "SplitLines": reflect.ValueOf(stringsx.SplitLines), - "TrimCR": reflect.ValueOf(stringsx.TrimCR), - } -} diff --git a/shell/interpreter/cogentcore_org-core-shell-cosh.go b/shell/interpreter/cogentcore_org-core-shell-cosh.go deleted file mode 100644 index 68bfce6a75..0000000000 --- a/shell/interpreter/cogentcore_org-core-shell-cosh.go +++ /dev/null @@ -1,21 +0,0 @@ -// Code generated by 'yaegi extract cogentcore.org/core/shell/cosh'. DO NOT EDIT. - -package interpreter - -import ( - "cogentcore.org/core/shell/cosh" - "reflect" -) - -func init() { - Symbols["cogentcore.org/core/shell/cosh/cosh"] = map[string]reflect.Value{ - // function, constant and variable definitions - "AllFiles": reflect.ValueOf(cosh.AllFiles), - "FileExists": reflect.ValueOf(cosh.FileExists), - "ReadFile": reflect.ValueOf(cosh.ReadFile), - "ReplaceInFile": reflect.ValueOf(cosh.ReplaceInFile), - "SplitLines": reflect.ValueOf(cosh.SplitLines), - "StringsToAnys": reflect.ValueOf(cosh.StringsToAnys), - "WriteFile": reflect.ValueOf(cosh.WriteFile), - } -} diff --git a/shell/interpreter/imports.go b/shell/interpreter/imports.go deleted file mode 100644 index 27c79398fd..0000000000 --- a/shell/interpreter/imports.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package interpreter - -//go:generate ./make - -import ( - "reflect" - - "github.com/cogentcore/yaegi/interp" -) - -var Symbols = map[string]map[string]reflect.Value{} - -// ImportShell imports special symbols from the shell package. -func (in *Interpreter) ImportShell() { - in.Interp.Use(interp.Exports{ - "cogentcore.org/core/shell/shell": map[string]reflect.Value{ - "Run": reflect.ValueOf(in.Shell.Run), - "RunErrOK": reflect.ValueOf(in.Shell.RunErrOK), - "Output": reflect.ValueOf(in.Shell.Output), - "OutputErrOK": reflect.ValueOf(in.Shell.OutputErrOK), - "Start": reflect.ValueOf(in.Shell.Start), - "AddCommand": reflect.ValueOf(in.Shell.AddCommand), - "RunCommands": reflect.ValueOf(in.Shell.RunCommands), - }, - }) -} diff --git a/shell/interpreter/interpreter.go b/shell/interpreter/interpreter.go deleted file mode 100644 index a70c5079f5..0000000000 --- a/shell/interpreter/interpreter.go +++ /dev/null @@ -1,252 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package interpreter - -import ( - "context" - "fmt" - "io" - "log" - "os" - "os/signal" - "reflect" - "strconv" - "strings" - "syscall" - - "cogentcore.org/core/base/errors" - "cogentcore.org/core/shell" - "github.com/cogentcore/yaegi/interp" - "github.com/cogentcore/yaegi/stdlib" - "github.com/ergochat/readline" -) - -// Interpreter represents one running shell context -type Interpreter struct { - // the cosh shell - Shell *shell.Shell - - // HistFile is the name of the history file to open / save. - // Defaults to ~/.cosh-history for the default cosh shell. - // Update this prior to running Config() to take effect. - HistFile string - - // the yaegi interpreter - Interp *interp.Interpreter -} - -func init() { - delete(stdlib.Symbols, "errors/errors") // use our errors package instead -} - -// NewInterpreter returns a new [Interpreter] initialized with the given options. -// It automatically imports the standard library and configures necessary shell -// functions. End user app must call [Interp.Config] after importing any additional -// symbols, prior to running the interpreter. -func NewInterpreter(options interp.Options) *Interpreter { - in := &Interpreter{HistFile: "~/.cosh-history"} - in.Shell = shell.NewShell() - if options.Stdin != nil { - in.Shell.Config.StdIO.In = options.Stdin - } - if options.Stdout != nil { - in.Shell.Config.StdIO.Out = options.Stdout - } - if options.Stderr != nil { - in.Shell.Config.StdIO.Err = options.Stderr - } - in.Shell.SaveOrigStdIO() - options.Stdout = in.Shell.StdIOWrappers.Out - options.Stderr = in.Shell.StdIOWrappers.Err - options.Stdin = in.Shell.StdIOWrappers.In - in.Interp = interp.New(options) - errors.Log(in.Interp.Use(stdlib.Symbols)) - errors.Log(in.Interp.Use(Symbols)) - in.ImportShell() - go in.MonitorSignals() - return in -} - -// Prompt returns the appropriate REPL prompt to show the user. -func (in *Interpreter) Prompt() string { - dp := in.Shell.TotalDepth() - if dp == 0 { - return in.Shell.HostAndDir() + " > " - } - res := "> " - for range dp { - res += " " // note: /t confuses readline - } - return res -} - -// Eval evaluates (interprets) the given code, -// returning the value returned from the interpreter. -// HasPrint indicates whether the last line of code -// has the string print in it, which is for determining -// whether to print the result in interactive mode. -// It automatically logs any error in addition to returning it. -func (in *Interpreter) Eval(code string) (v reflect.Value, hasPrint bool, err error) { - in.Shell.TranspileCode(code) - source := false - if in.Shell.SSHActive == "" { - source = strings.HasPrefix(code, "source") - } - if in.Shell.TotalDepth() == 0 { - nl := len(in.Shell.Lines) - if nl > 0 { - ln := in.Shell.Lines[nl-1] - if strings.Contains(strings.ToLower(ln), "print") { - hasPrint = true - } - } - v, err = in.RunCode() - in.Shell.Errors = nil - } - if source { - v, err = in.RunCode() // run accumulated code - } - return -} - -// RunCode runs the accumulated set of code lines -// and clears the stack of code lines. -// It automatically logs any error in addition to returning it. -func (in *Interpreter) RunCode() (reflect.Value, error) { - if len(in.Shell.Errors) > 0 { - return reflect.Value{}, errors.Join(in.Shell.Errors...) - } - in.Shell.AddChunk() - code := in.Shell.Chunks - in.Shell.ResetCode() - var v reflect.Value - var err error - for _, ch := range code { - ctx := in.Shell.StartContext() - v, err = in.Interp.EvalWithContext(ctx, ch) - in.Shell.EndContext() - if err != nil { - cancelled := errors.Is(err, context.Canceled) - // fmt.Println("cancelled:", cancelled) - in.Shell.RestoreOrigStdIO() - in.Shell.ResetDepth() - if !cancelled { - in.Shell.AddError(err) - } else { - in.Shell.Errors = nil - } - break - } - } - return v, err -} - -// RunConfig runs the .cosh startup config file in the user's -// home directory if it exists. -func (in *Interpreter) RunConfig() error { - err := in.Shell.TranspileConfig() - if err != nil { - errors.Log(err) - } - _, err = in.RunCode() - return err -} - -// MonitorSignals monitors the operating system signals to appropriately -// stop the interpreter and prevent the shell from closing on Control+C. -// It is called automatically in another goroutine in [NewInterpreter]. -func (in *Interpreter) MonitorSignals() { - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt, syscall.SIGTERM) - for { - <-c - in.Shell.CancelExecution() - } -} - -// Config performs final configuration after all the imports have been Use'd -func (in *Interpreter) Config() { - in.Interp.ImportUsed() - in.RunConfig() -} - -// OpenHistory opens history from the current HistFile -// and loads it into the readline history for given rl instance -func (in *Interpreter) OpenHistory(rl *readline.Instance) error { - err := in.Shell.OpenHistory(in.HistFile) - if err == nil { - for _, h := range in.Shell.Hist { - rl.SaveToHistory(h) - } - } - return err -} - -// SaveHistory saves last 500 (or HISTFILESIZE env value) lines of history, -// to the current HistFile. -func (in *Interpreter) SaveHistory() error { - n := 500 - if hfs := os.Getenv("HISTFILESIZE"); hfs != "" { - en, err := strconv.Atoi(hfs) - if err != nil { - in.Shell.Config.StdIO.ErrPrintf("SaveHistory: environment variable HISTFILESIZE: %q not a number: %s", hfs, err.Error()) - } else { - n = en - } - } - return in.Shell.SaveHistory(n, in.HistFile) -} - -// Interactive runs an interactive shell that allows the user to input cosh. -// Must have done in.Config() prior to calling. -func (in *Interpreter) Interactive() error { - rl, err := readline.NewFromConfig(&readline.Config{ - AutoComplete: &shell.ReadlineCompleter{Shell: in.Shell}, - Undo: true, - }) - if err != nil { - return err - } - in.OpenHistory(rl) - defer rl.Close() - log.SetOutput(rl.Stderr()) // redraw the prompt correctly after log output - - for { - rl.SetPrompt(in.Prompt()) - line, err := rl.ReadLine() - if errors.Is(err, readline.ErrInterrupt) { - continue - } - if errors.Is(err, io.EOF) { - in.SaveHistory() - os.Exit(0) - } - if err != nil { - in.SaveHistory() - return err - } - if len(line) > 0 && line[0] == '!' { // history command - hl, err := strconv.Atoi(line[1:]) - nh := len(in.Shell.Hist) - if err != nil { - in.Shell.Config.StdIO.ErrPrintf("history number: %q not a number: %s", line[1:], err.Error()) - line = "" - } else if hl >= nh { - in.Shell.Config.StdIO.ErrPrintf("history number: %d not in range: [0:%d]", hl, nh) - line = "" - } else { - line = in.Shell.Hist[hl] - fmt.Printf("h:%d\t%s\n", hl, line) - } - } else if line != "" && !strings.HasPrefix(line, "history") && line != "h" { - in.Shell.AddHistory(line) - } - in.Shell.Errors = nil - v, hasPrint, err := in.Eval(line) - if err == nil && !hasPrint && v.IsValid() && !v.IsZero() && v.Kind() != reflect.Func { - fmt.Println(v.Interface()) - } - } -} diff --git a/shell/interpreter/make b/shell/interpreter/make deleted file mode 100755 index da045d6d28..0000000000 --- a/shell/interpreter/make +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env cosh -// add standard imports here; mostly base - -command base { - println("extracting base packages") - yaegi extract cogentcore.org/core/base/fsx cogentcore.org/core/base/errors cogentcore.org/core/base/strcase cogentcore.org/core/base/elide cogentcore.org/core/base/stringsx cogentcore.org/core/base/datasize -} - -command cosh { - println("extracting cosh packages") - yaegi extract cogentcore.org/core/shell/cosh -} - -// shell.RunCommands(args) -base -cosh - diff --git a/shell/paths.go b/shell/paths.go deleted file mode 100644 index 3dc2029484..0000000000 --- a/shell/paths.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package shell - -import ( - "go/token" -) - -// ReplaceIdentAt replaces an identifier spanning n tokens -// starting at given index, with a single identifier with given string. -// This is used in Exec mode for dealing with identifiers and paths that are -// separately-parsed by Go. -func (tk Tokens) ReplaceIdentAt(at int, str string, n int) Tokens { - ntk := append(tk[:at], &Token{Tok: token.IDENT, Str: str}) - ntk = append(ntk, tk[at+n:]...) - return ntk -} - -// Path extracts a standard path or URL expression from the current -// list of tokens (starting at index 0), returning the path string -// and the number of tokens included in the path. -// Restricts processing to contiguous elements with no spaces! -// If it is not a path, returns nil string, 0 -func (tk Tokens) Path(idx0 bool) (string, int) { - n := len(tk) - if n == 0 { - return "", 0 - } - t0 := tk[0] - ispath := (t0.IsPathDelim() || t0.Tok == token.TILDE) - if n == 1 { - if ispath { - return t0.String(), 1 - } - return "", 0 - } - str := tk[0].String() - lastEnd := int(tk[0].Pos) + len(str) - ci := 1 - if !ispath { - lastEnd = int(tk[0].Pos) - ci = 0 - if t0.Tok != token.IDENT { - return "", 0 - } - tin := 1 - tid := t0.Str - tindelim := tk[tin].IsPathDelim() - if idx0 { - tindelim = tk[tin].Tok == token.QUO - } - if (int(tk[tin].Pos) > lastEnd+len(tid)) || !(tk[tin].Tok == token.COLON || tindelim) { - return "", 0 - } - ci += tin + 1 - str = tid + tk[tin].String() - lastEnd += len(str) - } - prevWasDelim := true - for { - if ci >= n || int(tk[ci].Pos) > lastEnd { - return str, ci - } - ct := tk[ci] - if ct.IsPathDelim() || ct.IsPathExtraDelim() { - prevWasDelim = true - str += ct.String() - lastEnd += len(ct.String()) - ci++ - continue - } - if ct.Tok == token.STRING { - prevWasDelim = true - str += EscapeQuotes(ct.String()) - lastEnd += len(ct.String()) - ci++ - continue - } - if !prevWasDelim { - if ct.Tok == token.ILLEGAL && ct.Str == `\` && ci+1 < n && int(tk[ci+1].Pos) == lastEnd+2 { - prevWasDelim = true - str += " " - ci++ - lastEnd += 2 - continue - } - return str, ci - } - if ct.IsWord() { - prevWasDelim = false - str += ct.String() - lastEnd += len(ct.String()) - ci++ - continue - } - return str, ci - } -} - -func (tk *Token) IsPathDelim() bool { - return tk.Tok == token.PERIOD || tk.Tok == token.QUO -} - -func (tk *Token) IsPathExtraDelim() bool { - return tk.Tok == token.SUB || tk.Tok == token.ASSIGN || tk.Tok == token.REM || (tk.Tok == token.ILLEGAL && (tk.Str == "?" || tk.Str == "#")) -} - -// IsWord returns true if the token is some kind of word-like entity, -// including IDENT, STRING, CHAR, or one of the Go keywords. -// This is for exec filtering. -func (tk *Token) IsWord() bool { - return tk.Tok == token.IDENT || tk.IsGo() || tk.Tok == token.STRING || tk.Tok == token.CHAR -} diff --git a/shell/run.go b/shell/run.go deleted file mode 100644 index d6347a962f..0000000000 --- a/shell/run.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package shell - -// Run executes the given command string, waiting for the command to finish, -// handling the given arguments appropriately. -// If there is any error, it adds it to the shell, and triggers CancelExecution. -// It forwards output to [exec.Config.Stdout] and [exec.Config.Stderr] appropriately. -func (sh *Shell) Run(cmd any, args ...any) { - sh.Exec(false, false, false, cmd, args...) -} - -// RunErrOK executes the given command string, waiting for the command to finish, -// handling the given arguments appropriately. -// It does not stop execution if there is an error. -// If there is any error, it adds it to the shell. It forwards output to -// [exec.Config.Stdout] and [exec.Config.Stderr] appropriately. -func (sh *Shell) RunErrOK(cmd any, args ...any) { - sh.Exec(true, false, false, cmd, args...) -} - -// Start starts the given command string for running in the background, -// handling the given arguments appropriately. -// If there is any error, it adds it to the shell. It forwards output to -// [exec.Config.Stdout] and [exec.Config.Stderr] appropriately. -func (sh *Shell) Start(cmd any, args ...any) { - sh.Exec(false, true, false, cmd, args...) -} - -// Output executes the given command string, handling the given arguments -// appropriately. If there is any error, it adds it to the shell. It returns -// the stdout as a string and forwards stderr to [exec.Config.Stderr] appropriately. -func (sh *Shell) Output(cmd any, args ...any) string { - return sh.Exec(false, false, true, cmd, args...) -} - -// OutputErrOK executes the given command string, handling the given arguments -// appropriately. If there is any error, it adds it to the shell. It returns -// the stdout as a string and forwards stderr to [exec.Config.Stderr] appropriately. -func (sh *Shell) OutputErrOK(cmd any, args ...any) string { - return sh.Exec(true, false, true, cmd, args...) -} diff --git a/shell/shell.go b/shell/shell.go deleted file mode 100644 index 655262a17e..0000000000 --- a/shell/shell.go +++ /dev/null @@ -1,494 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package shell provides the Cogent Shell (cosh), which combines the best parts -// of Go and bash to provide an integrated shell experience that allows you to -// easily run terminal commands while using Go for complicated logic. -package shell - -import ( - "context" - "fmt" - "io/fs" - "log/slog" - "os" - "path/filepath" - "slices" - "strconv" - "strings" - - "cogentcore.org/core/base/errors" - "cogentcore.org/core/base/exec" - "cogentcore.org/core/base/logx" - "cogentcore.org/core/base/num" - "cogentcore.org/core/base/reflectx" - "cogentcore.org/core/base/sshclient" - "cogentcore.org/core/base/stack" - "cogentcore.org/core/base/stringsx" - "github.com/mitchellh/go-homedir" - "golang.org/x/tools/imports" -) - -// Shell represents one running shell context. -type Shell struct { - - // Config is the [exec.Config] used to run commands. - Config exec.Config - - // StdIOWrappers are IO wrappers sent to the interpreter, so we can - // control the IO streams used within the interpreter. - // Call SetWrappers on this with another StdIO object to update settings. - StdIOWrappers exec.StdIO - - // ssh connection, configuration - SSH *sshclient.Config - - // collection of ssh clients - SSHClients map[string]*sshclient.Client - - // SSHActive is the name of the active SSH client - SSHActive string - - // depth of delim at the end of the current line. if 0, was complete. - ParenDepth, BraceDepth, BrackDepth, TypeDepth, DeclDepth int - - // Chunks of code lines that are accumulated during Transpile, - // each of which should be evaluated separately, to avoid - // issues with contextual effects from import, package etc. - Chunks []string - - // current stack of transpiled lines, that are accumulated into - // code Chunks - Lines []string - - // stack of runtime errors - Errors []error - - // Builtins are all the builtin shell commands - Builtins map[string]func(cmdIO *exec.CmdIO, args ...string) error - - // commands that have been defined, which can be run in Exec mode. - Commands map[string]func(args ...string) - - // Jobs is a stack of commands running in the background - // (via Start instead of Run) - Jobs stack.Stack[*exec.CmdIO] - - // Cancel, while the interpreter is running, can be called - // to stop the code interpreting. - // It is connected to the Ctx context, by StartContext() - // Both can be nil. - Cancel func() - - // Ctx is the context used for cancelling current shell running - // a single chunk of code, typically from the interpreter. - // We are not able to pass the context around so it is set here, - // in the StartContext function. Clear when done with ClearContext. - Ctx context.Context - - // original standard IO setings, to restore - OrigStdIO exec.StdIO - - // Hist is the accumulated list of command-line input, - // which is displayed with the history builtin command, - // and saved / restored from ~/.coshhist file - Hist []string - - // FuncToVar translates function definitions into variable definitions, - // which is the default for interactive use of random code fragments - // without the complete go formatting. - // For pure transpiling of a complete codebase with full proper Go formatting - // this should be turned off. - FuncToVar bool - - // commandArgs is a stack of args passed to a command, used for simplified - // processing of args expressions. - commandArgs stack.Stack[[]string] - - // isCommand is a stack of bools indicating whether the _immediate_ run context - // is a command, which affects the way that args are processed. - isCommand stack.Stack[bool] - - // if this is non-empty, it is the name of the last command defined. - // triggers insertion of the AddCommand call to add to list of defined commands. - lastCommand string -} - -// NewShell returns a new [Shell] with default options. -func NewShell() *Shell { - sh := &Shell{ - Config: exec.Config{ - Dir: errors.Log1(os.Getwd()), - Env: map[string]string{}, - Buffer: false, - }, - } - sh.FuncToVar = true - sh.Config.StdIO.SetFromOS() - sh.SSH = sshclient.NewConfig(&sh.Config) - sh.SSHClients = make(map[string]*sshclient.Client) - sh.Commands = make(map[string]func(args ...string)) - sh.InstallBuiltins() - return sh -} - -// StartContext starts a processing context, -// setting the Ctx and Cancel Fields. -// Call EndContext when current operation finishes. -func (sh *Shell) StartContext() context.Context { - sh.Ctx, sh.Cancel = context.WithCancel(context.Background()) - return sh.Ctx -} - -// EndContext ends a processing context, clearing the -// Ctx and Cancel fields. -func (sh *Shell) EndContext() { - sh.Ctx = nil - sh.Cancel = nil -} - -// SaveOrigStdIO saves the current Config.StdIO as the original to revert to -// after an error, and sets the StdIOWrappers to use them. -func (sh *Shell) SaveOrigStdIO() { - sh.OrigStdIO = sh.Config.StdIO - sh.StdIOWrappers.NewWrappers(&sh.OrigStdIO) -} - -// RestoreOrigStdIO reverts to using the saved OrigStdIO -func (sh *Shell) RestoreOrigStdIO() { - sh.Config.StdIO = sh.OrigStdIO - sh.OrigStdIO.SetToOS() - sh.StdIOWrappers.SetWrappers(&sh.OrigStdIO) -} - -// Close closes any resources associated with the shell, -// including terminating any commands that are not running "nohup" -// in the background. -func (sh *Shell) Close() { - sh.CloseSSH() - // todo: kill jobs etc -} - -// CloseSSH closes all open ssh client connections -func (sh *Shell) CloseSSH() { - sh.SSHActive = "" - for _, cl := range sh.SSHClients { - cl.Close() - } - sh.SSHClients = make(map[string]*sshclient.Client) -} - -// ActiveSSH returns the active ssh client -func (sh *Shell) ActiveSSH() *sshclient.Client { - if sh.SSHActive == "" { - return nil - } - return sh.SSHClients[sh.SSHActive] -} - -// Host returns the name we're running commands on, -// which is empty if localhost (default). -func (sh *Shell) Host() string { - cl := sh.ActiveSSH() - if cl == nil { - return "" - } - return "@" + sh.SSHActive + ":" + cl.Host -} - -// HostAndDir returns the name we're running commands on, -// which is empty if localhost (default), -// and the current directory on that host. -func (sh *Shell) HostAndDir() string { - host := "" - dir := sh.Config.Dir - home := errors.Log1(homedir.Dir()) - cl := sh.ActiveSSH() - if cl != nil { - host = "@" + sh.SSHActive + ":" + cl.Host + ":" - dir = cl.Dir - home = cl.HomeDir - } - rel := errors.Log1(filepath.Rel(home, dir)) - // if it has to go back, then it is not in home dir, so no ~ - if strings.Contains(rel, "..") { - return host + dir + string(filepath.Separator) - } - return host + filepath.Join("~", rel) + string(filepath.Separator) -} - -// SSHByHost returns the SSH client for given host name, with err if not found -func (sh *Shell) SSHByHost(host string) (*sshclient.Client, error) { - if scl, ok := sh.SSHClients[host]; ok { - return scl, nil - } - return nil, fmt.Errorf("ssh connection named: %q not found", host) -} - -// TotalDepth returns the sum of any unresolved paren, brace, or bracket depths. -func (sh *Shell) TotalDepth() int { - return num.Abs(sh.ParenDepth) + num.Abs(sh.BraceDepth) + num.Abs(sh.BrackDepth) -} - -// ResetCode resets the stack of transpiled code -func (sh *Shell) ResetCode() { - sh.Chunks = nil - sh.Lines = nil -} - -// ResetDepth resets the current depths to 0 -func (sh *Shell) ResetDepth() { - sh.ParenDepth, sh.BraceDepth, sh.BrackDepth, sh.TypeDepth, sh.DeclDepth = 0, 0, 0, 0, 0 -} - -// DepthError reports an error if any of the parsing depths are not zero, -// to be called at the end of transpiling a complete block of code. -func (sh *Shell) DepthError() error { - if sh.TotalDepth() == 0 { - return nil - } - str := "" - if sh.ParenDepth != 0 { - str += fmt.Sprintf("Incomplete parentheses (), remaining depth: %d\n", sh.ParenDepth) - } - if sh.BraceDepth != 0 { - str += fmt.Sprintf("Incomplete braces [], remaining depth: %d\n", sh.BraceDepth) - } - if sh.BrackDepth != 0 { - str += fmt.Sprintf("Incomplete brackets {}, remaining depth: %d\n", sh.BrackDepth) - } - if str != "" { - slog.Error(str) - return errors.New(str) - } - return nil -} - -// AddLine adds line on the stack -func (sh *Shell) AddLine(ln string) { - sh.Lines = append(sh.Lines, ln) -} - -// Code returns the current transpiled lines, -// split into chunks that should be compiled separately. -func (sh *Shell) Code() string { - sh.AddChunk() - if len(sh.Chunks) == 0 { - return "" - } - return strings.Join(sh.Chunks, "\n") -} - -// AddChunk adds current lines into a chunk of code -// that should be compiled separately. -func (sh *Shell) AddChunk() { - if len(sh.Lines) == 0 { - return - } - sh.Chunks = append(sh.Chunks, strings.Join(sh.Lines, "\n")) - sh.Lines = nil -} - -// TranspileCode processes each line of given code, -// adding the results to the LineStack -func (sh *Shell) TranspileCode(code string) { - lns := strings.Split(code, "\n") - n := len(lns) - if n == 0 { - return - } - for _, ln := range lns { - hasDecl := sh.DeclDepth > 0 - tl := sh.TranspileLine(ln) - sh.AddLine(tl) - if sh.BraceDepth == 0 && sh.BrackDepth == 0 && sh.ParenDepth == 1 && sh.lastCommand != "" { - sh.lastCommand = "" - nl := len(sh.Lines) - sh.Lines[nl-1] = sh.Lines[nl-1] + ")" - sh.ParenDepth-- - } - if hasDecl && sh.DeclDepth == 0 { // break at decl - sh.AddChunk() - } - } -} - -// TranspileCodeFromFile transpiles the code in given file -func (sh *Shell) TranspileCodeFromFile(file string) error { - b, err := os.ReadFile(file) - if err != nil { - return err - } - sh.TranspileCode(string(b)) - return nil -} - -// TranspileFile transpiles the given input cosh file to the -// given output Go file. If no existing package declaration -// is found, then package main and func main declarations are -// added. This also affects how functions are interpreted. -func (sh *Shell) TranspileFile(in string, out string) error { - b, err := os.ReadFile(in) - if err != nil { - return err - } - code := string(b) - lns := stringsx.SplitLines(code) - hasPackage := false - for _, ln := range lns { - if strings.HasPrefix(ln, "package ") { - hasPackage = true - break - } - } - if hasPackage { - sh.FuncToVar = false // use raw functions - } - sh.TranspileCode(code) - sh.FuncToVar = true - if err != nil { - return err - } - gen := "// Code generated by \"cosh build\"; DO NOT EDIT.\n\n" - if hasPackage { - sh.Lines = slices.Insert(sh.Lines, 0, gen) - } else { - sh.Lines = slices.Insert(sh.Lines, 0, gen, "package main", "", "func main() {", "shell := shell.NewShell()") - sh.Lines = append(sh.Lines, "}") - } - src := []byte(sh.Code()) - res, err := imports.Process(out, src, nil) - if err != nil { - res = src - slog.Error(err.Error()) - } else { - err = sh.DepthError() - } - werr := os.WriteFile(out, res, 0666) - return errors.Join(err, werr) -} - -// AddError adds the given error to the error stack if it is non-nil, -// and calls the Cancel function if set, to stop execution. -// This is the main way that shell errors are handled. -// It also prints the error. -func (sh *Shell) AddError(err error) error { - if err == nil { - return nil - } - sh.Errors = append(sh.Errors, err) - logx.PrintlnError(err) - sh.CancelExecution() - return err -} - -// TranspileConfig transpiles the .cosh startup config file in the user's -// home directory if it exists. -func (sh *Shell) TranspileConfig() error { - path, err := homedir.Expand("~/.cosh") - if err != nil { - return err - } - b, err := os.ReadFile(path) - if err != nil { - if errors.Is(err, fs.ErrNotExist) { - return nil - } - return err - } - sh.TranspileCode(string(b)) - return nil -} - -// AddHistory adds given line to the Hist record of commands -func (sh *Shell) AddHistory(line string) { - sh.Hist = append(sh.Hist, line) -} - -// SaveHistory saves up to the given number of lines of current history -// to given file, e.g., ~/.coshhist for the default cosh program. -// If n is <= 0 all lines are saved. n is typically 500 by default. -func (sh *Shell) SaveHistory(n int, file string) error { - path, err := homedir.Expand(file) - if err != nil { - return err - } - hn := len(sh.Hist) - sn := hn - if n > 0 { - sn = min(n, hn) - } - lh := strings.Join(sh.Hist[hn-sn:hn], "\n") - err = os.WriteFile(path, []byte(lh), 0666) - if err != nil { - return err - } - return nil -} - -// OpenHistory opens Hist history lines from given file, -// e.g., ~/.coshhist -func (sh *Shell) OpenHistory(file string) error { - path, err := homedir.Expand(file) - if err != nil { - return err - } - b, err := os.ReadFile(path) - if err != nil { - return err - } - sh.Hist = strings.Split(string(b), "\n") - return nil -} - -// AddCommand adds given command to list of available commands -func (sh *Shell) AddCommand(name string, cmd func(args ...string)) { - sh.Commands[name] = cmd -} - -// RunCommands runs the given command(s). This is typically called -// from a Makefile-style cosh script. -func (sh *Shell) RunCommands(cmds []any) error { - for _, cmd := range cmds { - if cmdFun, hasCmd := sh.Commands[reflectx.ToString(cmd)]; hasCmd { - cmdFun() - } else { - return errors.Log(fmt.Errorf("command %q not found", cmd)) - } - } - return nil -} - -// DeleteJob deletes the given job and returns true if successful, -func (sh *Shell) DeleteJob(cmdIO *exec.CmdIO) bool { - idx := slices.Index(sh.Jobs, cmdIO) - if idx >= 0 { - sh.Jobs = slices.Delete(sh.Jobs, idx, idx+1) - return true - } - return false -} - -// JobIDExpand expands %n job id values in args with the full PID -// returns number of PIDs expanded -func (sh *Shell) JobIDExpand(args []string) int { - exp := 0 - for i, id := range args { - if id[0] == '%' { - idx, err := strconv.Atoi(id[1:]) - if err == nil { - if idx > 0 && idx <= len(sh.Jobs) { - jb := sh.Jobs[idx-1] - if jb.Cmd != nil && jb.Cmd.Process != nil { - args[i] = fmt.Sprintf("%d", jb.Cmd.Process.Pid) - exp++ - } - } else { - sh.AddError(fmt.Errorf("cosh: job number out of range: %d", idx)) - } - } - } - } - return exp -} diff --git a/shell/token.go b/shell/token.go deleted file mode 100644 index a84c01a790..0000000000 --- a/shell/token.go +++ /dev/null @@ -1,333 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package shell - -import ( - "go/scanner" - "go/token" - "log/slog" - "slices" - "strings" - - "cogentcore.org/core/base/logx" -) - -// Token provides full data for one token -type Token struct { - // Go token classification - Tok token.Token - - // Literal string - Str string - - // position in the original string. - // this is only set for the original parse, - // not for transpiled additions. - Pos token.Pos -} - -// Tokens is a slice of Token -type Tokens []*Token - -// NewToken returns a new token, for generated tokens without Pos -func NewToken(tok token.Token, str ...string) *Token { - tk := &Token{Tok: tok} - if len(str) > 0 { - tk.Str = str[0] - } - return tk -} - -// Add adds a new token, for generated tokens without Pos -func (tk *Tokens) Add(tok token.Token, str ...string) *Token { - nt := NewToken(tok, str...) - *tk = append(*tk, nt) - return nt -} - -// AddTokens adds given tokens to our list -func (tk *Tokens) AddTokens(toks Tokens) *Tokens { - *tk = append(*tk, toks...) - return tk -} - -// Insert inserts a new token at given position -func (tk *Tokens) Insert(i int, tok token.Token, str ...string) *Token { - nt := NewToken(tok, str...) - *tk = slices.Insert(*tk, i, nt) - return nt -} - -// Last returns the final token in the list -func (tk Tokens) Last() *Token { - n := len(tk) - if n == 0 { - return nil - } - return tk[n-1] -} - -// DeleteLastComma removes any final Comma. -// easier to generate and delete at the end -func (tk *Tokens) DeleteLastComma() { - lt := tk.Last() - if lt == nil { - return - } - if lt.Tok == token.COMMA { - *tk = (*tk)[:len(*tk)-1] - } -} - -// String returns the string for the token -func (tk *Token) String() string { - if tk.Str != "" { - return tk.Str - } - return tk.Tok.String() -} - -// IsBacktickString returns true if the given STRING uses backticks -func (tk *Token) IsBacktickString() bool { - if tk.Tok != token.STRING { - return false - } - return (tk.Str[0] == '`') -} - -// IsGo returns true if the given token is a Go Keyword or Comment -func (tk *Token) IsGo() bool { - if tk.Tok >= token.BREAK && tk.Tok <= token.VAR { - return true - } - if tk.Tok == token.COMMENT { - return true - } - return false -} - -// IsValidExecIdent returns true if the given token is a valid component -// of an Exec mode identifier -func (tk *Token) IsValidExecIdent() bool { - return (tk.IsGo() || tk.Tok == token.IDENT || tk.Tok == token.SUB || tk.Tok == token.DEC || tk.Tok == token.INT || tk.Tok == token.FLOAT || tk.Tok == token.ASSIGN) -} - -// String is the stringer version which includes the token ID -// in addition to the string literal -func (tk Tokens) String() string { - str := "" - for _, tok := range tk { - str += "[" + tok.Tok.String() + "] " - if tok.Str != "" { - str += tok.Str + " " - } - } - if len(str) == 0 { - return str - } - return str[:len(str)-1] // remove trailing space -} - -// Code returns concatenated Str values of the tokens, -// to generate a surface-valid code string. -func (tk Tokens) Code() string { - n := len(tk) - if n == 0 { - return "" - } - str := "" - prvIdent := false - for _, tok := range tk { - switch { - case tok.IsOp(): - if tok.Tok == token.INC || tok.Tok == token.DEC { - str += tok.String() + " " - } else if tok.Tok == token.MUL { - str += " " + tok.String() - } else { - str += " " + tok.String() + " " - } - prvIdent = false - case tok.Tok == token.ELLIPSIS: - str += " " + tok.String() - prvIdent = false - case tok.IsBracket() || tok.Tok == token.PERIOD: - if tok.Tok == token.RBRACE || tok.Tok == token.LBRACE { - if len(str) > 0 && str[len(str)-1] != ' ' { - str += " " - } - str += tok.String() + " " - } else { - str += tok.String() - } - prvIdent = false - case tok.Tok == token.COMMA || tok.Tok == token.COLON || tok.Tok == token.SEMICOLON: - str += tok.String() + " " - prvIdent = false - case tok.Tok == token.STRUCT: - str += " " + tok.String() + " " - case tok.Tok == token.FUNC: - if prvIdent { - str += " " - } - str += tok.String() - prvIdent = true - case tok.IsGo(): - if prvIdent { - str += " " - } - str += tok.String() - if tok.Tok != token.MAP { - str += " " - } - prvIdent = false - case tok.Tok == token.IDENT || tok.Tok == token.STRING: - if prvIdent { - str += " " - } - str += tok.String() - prvIdent = true - default: - str += tok.String() - prvIdent = false - } - } - if len(str) == 0 { - return str - } - if str[len(str)-1] == ' ' { - return str[:len(str)-1] - } - return str -} - -// IsOp returns true if the given token is an operator -func (tk *Token) IsOp() bool { - if tk.Tok >= token.ADD && tk.Tok <= token.DEFINE { - return true - } - return false -} - -// Contains returns true if the token string contains any of the given token(s) -func (tk Tokens) Contains(toks ...token.Token) bool { - if len(toks) == 0 { - slog.Error("programmer error: tokens.Contains with no args") - return false - } - for _, t := range tk { - for _, st := range toks { - if t.Tok == st { - return true - } - } - } - return false -} - -// EscapeQuotes replaces any " with \" -func EscapeQuotes(str string) string { - return strings.ReplaceAll(str, `"`, `\"`) -} - -// AddQuotes surrounds given string with quotes, -// also escaping any contained quotes -func AddQuotes(str string) string { - return `"` + EscapeQuotes(str) + `"` -} - -// IsBracket returns true if the given token is a bracket delimiter: -// paren, brace, bracket -func (tk *Token) IsBracket() bool { - if (tk.Tok >= token.LPAREN && tk.Tok <= token.LBRACE) || (tk.Tok >= token.RPAREN && tk.Tok <= token.RBRACE) { - return true - } - return false -} - -// RightMatching returns the position (or -1 if not found) for the -// right matching [paren, bracket, brace] given the left one that -// is at the 0 position of the current set of tokens. -func (tk Tokens) RightMatching() int { - n := len(tk) - if n == 0 { - return -1 - } - rb := token.RPAREN - lb := tk[0].Tok - switch lb { - case token.LPAREN: - rb = token.RPAREN - case token.LBRACK: - rb = token.RBRACK - case token.LBRACE: - rb = token.RBRACE - } - depth := 0 - for i := 1; i < n; i++ { - tok := tk[i].Tok - switch tok { - case rb: - if depth <= 0 { - return i - } - depth-- - case lb: - depth++ - } - } - return -1 -} - -// BracketDepths returns the depths for the three bracket delimiters -// [paren, bracket, brace], based on unmatched right versions. -func (tk Tokens) BracketDepths() (paren, brace, brack int) { - n := len(tk) - if n == 0 { - return - } - for i := 0; i < n; i++ { - tok := tk[i].Tok - switch tok { - case token.LPAREN: - paren++ - case token.LBRACE: - brace++ - case token.LBRACK: - brack++ - case token.RPAREN: - paren-- - case token.RBRACE: - brace-- - case token.RBRACK: - brack-- - } - } - return -} - -// Tokens converts the string into tokens -func (sh *Shell) Tokens(ln string) Tokens { - fset := token.NewFileSet() - f := fset.AddFile("", fset.Base(), len(ln)) - var sc scanner.Scanner - sc.Init(f, []byte(ln), sh.errHandler, scanner.ScanComments|2) // 2 is non-exported dontInsertSemis - // note to Go team: just export this stuff. seriously. - - var toks Tokens - for { - pos, tok, lit := sc.Scan() - if tok == token.EOF { - break - } - // logx.PrintfDebug(" token: %s\t%s\t%q\n", fset.Position(pos), tok, lit) - toks = append(toks, &Token{Tok: tok, Pos: pos, Str: lit}) - } - return toks -} - -func (sh *Shell) errHandler(pos token.Position, msg string) { - logx.PrintlnDebug("Scan Error:", pos, msg) -} diff --git a/shell/transpile.go b/shell/transpile.go deleted file mode 100644 index 305e832255..0000000000 --- a/shell/transpile.go +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package shell - -import ( - "fmt" - "go/token" - "strings" - - "cogentcore.org/core/base/logx" -) - -// TranspileLine is the main function for parsing a single line of shell input, -// returning a new transpiled line of code that converts Exec code into corresponding -// Go function calls. -func (sh *Shell) TranspileLine(ln string) string { - if len(ln) == 0 { - return ln - } - if strings.HasPrefix(ln, "#!") { - return "" - } - toks := sh.TranspileLineTokens(ln) - paren, brace, brack := toks.BracketDepths() - sh.ParenDepth += paren - sh.BraceDepth += brace - sh.BrackDepth += brack - if sh.TypeDepth > 0 && sh.BraceDepth == 0 { - sh.TypeDepth = 0 - } - if sh.DeclDepth > 0 && sh.ParenDepth == 0 { - sh.DeclDepth = 0 - } - // logx.PrintlnDebug("depths: ", sh.ParenDepth, sh.BraceDepth, sh.BrackDepth) - return toks.Code() -} - -// TranspileLineTokens returns the tokens for the full line -func (sh *Shell) TranspileLineTokens(ln string) Tokens { - if ln == "" { - return nil - } - toks := sh.Tokens(ln) - n := len(toks) - if n == 0 { - return toks - } - ewords, err := ExecWords(ln) - if err != nil { - sh.AddError(err) - return nil - } - logx.PrintlnDebug("\n########## line:\n", ln, "\nTokens:\n", toks.String(), "\nWords:\n", ewords) - - if toks[0].Tok == token.TYPE { - sh.TypeDepth++ - } - if toks[0].Tok == token.IMPORT || toks[0].Tok == token.VAR || toks[0].Tok == token.CONST { - sh.DeclDepth++ - } - - if sh.TypeDepth > 0 || sh.DeclDepth > 0 { - logx.PrintlnDebug("go: type / decl defn") - return sh.TranspileGo(toks) - } - - t0 := toks[0] - _, t0pn := toks.Path(true) // true = first position - en := len(ewords) - - f0exec := (t0.Tok == token.IDENT && ExecWordIsCommand(ewords[0])) - - switch { - case t0.Tok == token.LBRACE: - logx.PrintlnDebug("go: { } line") - return sh.TranspileGo(toks[1 : n-1]) - case t0.Tok == token.LBRACK: - logx.PrintlnDebug("exec: [ ] line") - return sh.TranspileExec(ewords, false) // it processes the [ ] - case t0.Tok == token.ILLEGAL: - logx.PrintlnDebug("exec: illegal") - return sh.TranspileExec(ewords, false) - case t0.IsBacktickString(): - logx.PrintlnDebug("exec: backquoted string") - exe := sh.TranspileExecString(t0.Str, false) - if n > 1 { // todo: is this an error? - exe.AddTokens(sh.TranspileGo(toks[1:])) - } - return exe - case t0.Tok == token.IDENT && t0.Str == "command": - sh.lastCommand = toks[1].Str // 1 is the name -- triggers AddCommand - toks = toks[2:] // get rid of first - toks.Insert(0, token.IDENT, "shell.AddCommand") - toks.Insert(1, token.LPAREN) - toks.Insert(2, token.STRING, `"`+sh.lastCommand+`"`) - toks.Insert(3, token.COMMA) - toks.Insert(4, token.FUNC) - toks.Insert(5, token.LPAREN) - toks.Insert(6, token.IDENT, "args") - toks.Insert(7, token.ELLIPSIS) - toks.Insert(8, token.IDENT, "string") - toks.Insert(9, token.RPAREN) - toks.AddTokens(sh.TranspileGo(toks[11:])) - case t0.IsGo(): - if t0.Tok == token.GO { - if !toks.Contains(token.LPAREN) { - logx.PrintlnDebug("exec: go command") - return sh.TranspileExec(ewords, false) - } - } - logx.PrintlnDebug("go keyword") - return sh.TranspileGo(toks) - case toks[n-1].Tok == token.INC: - return sh.TranspileGo(toks) - case t0pn > 0: // path expr - logx.PrintlnDebug("exec: path...") - return sh.TranspileExec(ewords, false) - case t0.Tok == token.STRING: - logx.PrintlnDebug("exec: string...") - return sh.TranspileExec(ewords, false) - case f0exec && en == 1: - logx.PrintlnDebug("exec: 1 word") - return sh.TranspileExec(ewords, false) - case !f0exec: // exec must be IDENT - logx.PrintlnDebug("go: not ident") - return sh.TranspileGo(toks) - case f0exec && en > 1 && (ewords[1][0] == '=' || ewords[1][0] == ':' || ewords[1][0] == '+' || toks[1].Tok == token.COMMA): - logx.PrintlnDebug("go: assignment or defn") - return sh.TranspileGo(toks) - case f0exec: // now any ident - logx.PrintlnDebug("exec: ident..") - return sh.TranspileExec(ewords, false) - default: - logx.PrintlnDebug("go: default") - return sh.TranspileGo(toks) - } - return toks -} - -// TranspileGo returns transpiled tokens assuming Go code. -// Unpacks any backtick encapsulated shell commands. -func (sh *Shell) TranspileGo(toks Tokens) Tokens { - n := len(toks) - if n == 0 { - return toks - } - if sh.FuncToVar && toks[0].Tok == token.FUNC { // reorder as an assignment - if len(toks) > 1 && toks[1].Tok == token.IDENT { - toks[0] = toks[1] - toks.Insert(1, token.DEFINE) - toks[2] = &Token{Tok: token.FUNC} - } - } - gtoks := make(Tokens, 0, len(toks)) // return tokens - for _, tok := range toks { - if sh.TypeDepth == 0 && tok.IsBacktickString() { - gtoks = append(gtoks, sh.TranspileExecString(tok.Str, true)...) - } else { - gtoks = append(gtoks, tok) - } - } - return gtoks -} - -// TranspileExecString returns transpiled tokens assuming Exec code, -// from a backtick-encoded string, with the given bool indicating -// whether [Output] is needed. -func (sh *Shell) TranspileExecString(str string, output bool) Tokens { - if len(str) <= 1 { - return nil - } - ewords, err := ExecWords(str[1 : len(str)-1]) // enclosed string - if err != nil { - sh.AddError(err) - } - return sh.TranspileExec(ewords, output) -} - -// TranspileExec returns transpiled tokens assuming Exec code, -// with the given bools indicating the type of run to execute. -func (sh *Shell) TranspileExec(ewords []string, output bool) Tokens { - n := len(ewords) - if n == 0 { - return nil - } - etoks := make(Tokens, 0, n+5) // return tokens - var execTok *Token - bgJob := false - noStop := false - if ewords[0] == "[" { - ewords = ewords[1:] - n-- - noStop = true - } - startExec := func() { - bgJob = false - etoks.Add(token.IDENT, "shell") - etoks.Add(token.PERIOD) - switch { - case output && noStop: - execTok = etoks.Add(token.IDENT, "OutputErrOK") - case output && !noStop: - execTok = etoks.Add(token.IDENT, "Output") - case !output && noStop: - execTok = etoks.Add(token.IDENT, "RunErrOK") - case !output && !noStop: - execTok = etoks.Add(token.IDENT, "Run") - } - etoks.Add(token.LPAREN) - } - endExec := func() { - if bgJob { - execTok.Str = "Start" - } - etoks.DeleteLastComma() - etoks.Add(token.RPAREN) - } - - startExec() - - for i := 0; i < n; i++ { - f := ewords[i] - switch { - case f == "{": // embedded go - if n < i+3 { - sh.AddError(fmt.Errorf("cosh: no matching right brace } found in exec command line")) - } else { - gstr := ewords[i+1] - etoks.AddTokens(sh.TranspileGo(sh.Tokens(gstr))) - etoks.Add(token.COMMA) - i += 2 - } - case f == "[": - noStop = true - case f == "]": // solo is def end - // just skip - noStop = false - case f == "&": - bgJob = true - case f[0] == '|': - execTok.Str = "Start" - etoks.Add(token.IDENT, AddQuotes(f)) - etoks.Add(token.COMMA) - endExec() - etoks.Add(token.SEMICOLON) - etoks.AddTokens(sh.TranspileExec(ewords[i+1:], output)) - return etoks - case f == ";": - endExec() - etoks.Add(token.SEMICOLON) - etoks.AddTokens(sh.TranspileExec(ewords[i+1:], output)) - return etoks - default: - if f[0] == '"' || f[0] == '`' { - etoks.Add(token.STRING, f) - } else { - etoks.Add(token.IDENT, AddQuotes(f)) // mark as an IDENT but add quotes! - } - etoks.Add(token.COMMA) - } - } - endExec() - return etoks -} diff --git a/shell/transpile_test.go b/shell/transpile_test.go deleted file mode 100644 index 94d07d470f..0000000000 --- a/shell/transpile_test.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package shell - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -type exIn struct { - i string - e string -} - -type wexIn struct { - i string - isErr bool - e []string -} - -// these are more general tests of full-line statements of various forms -func TestExecWords(t *testing.T) { - tests := []wexIn{ - {`ls`, false, []string{`ls`}}, - {`cat "be"`, false, []string{`cat`, `"be"`}}, - {`cat "be`, true, []string{`cat`, `"be`}}, - {`cat "be a thing"`, false, []string{`cat`, `"be a thing"`}}, - {`cat "{be \"a\" thing}"`, false, []string{`cat`, `"{be \"a\" thing}"`}}, - {`cat {vals[1:10]}`, false, []string{`cat`, `{`, `vals[1:10]`, `}`}}, - {`cat {myfunc(vals[1:10], "test", false)}`, false, []string{`cat`, `{`, `myfunc(vals[1:10],"test",false)`, `}`}}, - {`cat vals[1:10]`, false, []string{`cat`, `vals[1:10]`}}, - {`cat vals...`, false, []string{`cat`, `vals...`}}, - {`[cat vals...]`, false, []string{`[`, `cat`, `vals...`, `]`}}, - {`[cat vals...]; ls *.tsv`, false, []string{`[`, `cat`, `vals...`, `]`, `;`, `ls`, `*.tsv`}}, - {`cat vals... | grep -v "b"`, false, []string{`cat`, `vals...`, `|`, `grep`, `-v`, `"b"`}}, - {`cat vals...>&file.out`, false, []string{`cat`, `vals...`, `>&`, `file.out`}}, - {`cat vals...>&@0:file.out`, false, []string{`cat`, `vals...`, `>&`, `@0:file.out`}}, - {`./"Cogent Code"`, false, []string{`./"Cogent Code"`}}, - {`Cogent\ Code`, false, []string{`Cogent Code`}}, - {`./Cogent\ Code`, false, []string{`./Cogent Code`}}, - } - for _, test := range tests { - o, err := ExecWords(test.i) - assert.Equal(t, test.e, o) - if err != nil { - if !test.isErr { - t.Error("should not have been an error:", test.i) - } - } else if test.isErr { - t.Error("was supposed to be an error:", test.i) - } - } -} - -// Paths tests the Path() code -func TestPaths(t *testing.T) { - // logx.UserLevel = slog.LevelDebug - tests := []exIn{ - {`fmt.Println("hi")`, `fmt.Println`}, - {"./cosh -i", `./cosh`}, - {"main.go", `main.go`}, - {"cogent/", `cogent/`}, - {`./"Cogent Code"`, `./\"Cogent Code\"`}, - {`Cogent\ Code`, ``}, - {`./Cogent\ Code`, `./Cogent Code`}, - {"./ios-deploy", `./ios-deploy`}, - {"ios_deploy/sub", `ios_deploy/sub`}, - {"C:/ios_deploy/sub", `C:/ios_deploy/sub`}, - {"..", `..`}, - {"../another/dir/to/go_to", `../another/dir/to/go_to`}, - {"../an-other/dir/", `../an-other/dir/`}, - {"https://google.com/search?q=hello%20world#body", `https://google.com/search?q=hello%20world#body`}, - } - sh := NewShell() - for _, test := range tests { - toks := sh.Tokens(test.i) - p, _ := toks.Path(false) - assert.Equal(t, test.e, p) - } -} - -// these are more general tests of full-line statements of various forms -func TestTranspile(t *testing.T) { - // logx.UserLevel = slog.LevelDebug - tests := []exIn{ - {"ls", `shell.Run("ls")`}, - {"`ls -la`", `shell.Run("ls", "-la")`}, - {"ls -la", `shell.Run("ls", "-la")`}, - {"ls --help", `shell.Run("ls", "--help")`}, - {"ls go", `shell.Run("ls", "go")`}, - {"cd go", `shell.Run("cd", "go")`}, - {`var name string`, `var name string`}, - {`name = "test"`, `name = "test"`}, - {`echo {name}`, `shell.Run("echo", name)`}, - {`echo "testing"`, `shell.Run("echo", "testing")`}, - {`number := 1.23`, `number := 1.23`}, - {`res1, res2 := FunTwoRet()`, `res1, res2 := FunTwoRet()`}, - {`res1, res2, res3 := FunThreeRet()`, `res1, res2, res3 := FunThreeRet()`}, - {`println("hi")`, `println("hi")`}, - {`fmt.Println("hi")`, `fmt.Println("hi")`}, - {`for i := 0; i < 3; i++ { fmt.Println(i, "\n")`, `for i := 0; i < 3; i++ { fmt.Println(i, "\n")`}, - {"for i, v := range `ls -la` {", `for i, v := range shell.Output("ls", "-la") {`}, - {`// todo: fixit`, `// todo: fixit`}, - {"`go build`", `shell.Run("go", "build")`}, - {"{go build()}", `go build()`}, - {"go build", `shell.Run("go", "build")`}, - {"go build()", `go build()`}, - {"go build &", `shell.Start("go", "build")`}, - {"[mkdir subdir]", `shell.RunErrOK("mkdir", "subdir")`}, - {"set something hello-1", `shell.Run("set", "something", "hello-1")`}, - {"set something = hello", `shell.Run("set", "something", "=", "hello")`}, - {`set something = "hello"`, `shell.Run("set", "something", "=", "hello")`}, - {`set something=hello`, `shell.Run("set", "something=hello")`}, - {`set "something=hello"`, `shell.Run("set", "something=hello")`}, - {`set something="hello"`, `shell.Run("set", "something=\"hello\"")`}, - {`add-path /opt/sbin /opt/homebrew/bin`, `shell.Run("add-path", "/opt/sbin", "/opt/homebrew/bin")`}, - {`cat file > test.out`, `shell.Run("cat", "file", ">", "test.out")`}, - {`cat file | grep -v exe > test.out`, `shell.Start("cat", "file", "|"); shell.Run("grep", "-v", "exe", ">", "test.out")`}, - {`cd sub; pwd; ls -la`, `shell.Run("cd", "sub"); shell.Run("pwd"); shell.Run("ls", "-la")`}, - {`cd sub; [mkdir sub]; ls -la`, `shell.Run("cd", "sub"); shell.RunErrOK("mkdir", "sub"); shell.Run("ls", "-la")`}, - {`cd sub; mkdir names[4]`, `shell.Run("cd", "sub"); shell.Run("mkdir", "names[4]")`}, - {"ls -la > test.out", `shell.Run("ls", "-la", ">", "test.out")`}, - {"ls > test.out", `shell.Run("ls", ">", "test.out")`}, - {"ls -la >test.out", `shell.Run("ls", "-la", ">", "test.out")`}, - {"ls -la >> test.out", `shell.Run("ls", "-la", ">>", "test.out")`}, - {"ls -la >& test.out", `shell.Run("ls", "-la", ">&", "test.out")`}, - {"ls -la >>& test.out", `shell.Run("ls", "-la", ">>&", "test.out")`}, - {"@1 ls -la", `shell.Run("@1", "ls", "-la")`}, - {"git switch main", `shell.Run("git", "switch", "main")`}, - {"git checkout 123abc", `shell.Run("git", "checkout", "123abc")`}, - {"go get cogentcore.org/core@main", `shell.Run("go", "get", "cogentcore.org/core@main")`}, - {"ls *.go", `shell.Run("ls", "*.go")`}, - {"ls ??.go", `shell.Run("ls", "??.go")`}, - {`fmt.Println("hi")`, `fmt.Println("hi")`}, - {"cosh -i", `shell.Run("cosh", "-i")`}, - {"./cosh -i", `shell.Run("./cosh", "-i")`}, - {"cat main.go", `shell.Run("cat", "main.go")`}, - {"cd cogent", `shell.Run("cd", "cogent")`}, - {"cd cogent/", `shell.Run("cd", "cogent/")`}, - {"echo $PATH", `shell.Run("echo", "$PATH")`}, - {`"./Cogent Code"`, `shell.Run("./Cogent Code")`}, - {`./"Cogent Code"`, `shell.Run("./\"Cogent Code\"")`}, - {`Cogent\ Code`, `shell.Run("Cogent Code")`}, - {`./Cogent\ Code`, `shell.Run("./Cogent Code")`}, - {`ios\ deploy -i`, `shell.Run("ios deploy", "-i")`}, - {"./ios-deploy -i", `shell.Run("./ios-deploy", "-i")`}, - {"ios_deploy -i tree_file", `shell.Run("ios_deploy", "-i", "tree_file")`}, - {"ios_deploy/sub -i tree_file", `shell.Run("ios_deploy/sub", "-i", "tree_file")`}, - {"C:/ios_deploy/sub -i tree_file", `shell.Run("C:/ios_deploy/sub", "-i", "tree_file")`}, - {"ios_deploy -i tree_file/path", `shell.Run("ios_deploy", "-i", "tree_file/path")`}, - {"ios-deploy -i", `shell.Run("ios-deploy", "-i")`}, - {"ios-deploy -i tree-file", `shell.Run("ios-deploy", "-i", "tree-file")`}, - {"ios-deploy -i tree-file/path/here", `shell.Run("ios-deploy", "-i", "tree-file/path/here")`}, - {"cd ..", `shell.Run("cd", "..")`}, - {"cd ../another/dir/to/go_to", `shell.Run("cd", "../another/dir/to/go_to")`}, - {"cd ../an-other/dir/", `shell.Run("cd", "../an-other/dir/")`}, - {"curl https://google.com/search?q=hello%20world#body", `shell.Run("curl", "https://google.com/search?q=hello%20world#body")`}, - {"func splitLines(str string) []string {", `splitLines := func(str string)[]string {`}, - {"type Result struct {", `type Result struct {`}, - {"var Jobs *table.Table", `var Jobs *table.Table`}, - {"type Result struct { JobID string", `type Result struct { JobID string`}, - {"type Result struct { JobID string `width:\"60\"`", "type Result struct { JobID string `width:\"60\"`"}, - {"func RunInExamples(fun func()) {", "RunInExamples := func(fun func()) {"}, - {"ctr++", "ctr++"}, - {"stru.ctr++", "stru.ctr++"}, - {"meta += ln", "meta += ln"}, - {"var data map[string]any", "var data map[string]any"}, - } - - sh := NewShell() - for _, test := range tests { - o := sh.TranspileLine(test.i) - assert.Equal(t, test.e, o) - } -} - -// tests command generation -func TestCommand(t *testing.T) { - // logx.UserLevel = slog.LevelDebug - tests := []exIn{ - { - `command list { -ls -la args... -}`, - `shell.AddCommand("list", func(args ...string) { -shell.Run("ls", "-la", "args...") -})`}, - } - - sh := NewShell() - for _, test := range tests { - sh.TranspileCode(test.i) - o := sh.Code() - assert.Equal(t, test.e, o) - } -} diff --git a/spell/dict/dtool.go b/spell/dict/dtool.go index 73d01cee20..84c66102d1 100644 --- a/spell/dict/dtool.go +++ b/spell/dict/dtool.go @@ -14,7 +14,7 @@ import ( //go:generate core generate -add-types -add-funcs -// Config is the configuration information for the cosh cli. +// Config is the configuration information for the dict cli. type Config struct { // InputA is the first input dictionary file diff --git a/spell/dict/typegen.go b/spell/dict/typegen.go index 7fcd3f4f0a..af1751e4be 100644 --- a/spell/dict/typegen.go +++ b/spell/dict/typegen.go @@ -6,7 +6,7 @@ import ( "cogentcore.org/core/types" ) -var _ = types.AddType(&types.Type{Name: "main.Config", IDName: "config", Doc: "Config is the configuration information for the cosh cli.", Directives: []types.Directive{{Tool: "go", Directive: "generate", Args: []string{"core", "generate", "-add-types", "-add-funcs"}}}, Fields: []types.Field{{Name: "InputA", Doc: "InputA is the first input dictionary file"}, {Name: "InputB", Doc: "InputB is the second input dictionary file"}, {Name: "Output", Doc: "Output is the output file for merge command"}}}) +var _ = types.AddType(&types.Type{Name: "main.Config", IDName: "config", Doc: "Config is the configuration information for the dict cli.", Directives: []types.Directive{{Tool: "go", Directive: "generate", Args: []string{"core", "generate", "-add-types", "-add-funcs"}}}, Fields: []types.Field{{Name: "InputA", Doc: "InputA is the first input dictionary file"}, {Name: "InputB", Doc: "InputB is the second input dictionary file"}, {Name: "Output", Doc: "Output is the output file for merge command"}}}) var _ = types.AddFunc(&types.Func{Name: "main.Compare", Doc: "Compare compares two dictionaries", Directives: []types.Directive{{Tool: "cli", Directive: "cmd", Args: []string{"-root"}}}, Args: []string{"c"}, Returns: []string{"error"}}) diff --git a/styles/font.go b/styles/font.go index 34bc3cd804..3712adc600 100644 --- a/styles/font.go +++ b/styles/font.go @@ -21,22 +21,24 @@ import ( // for rendering -- see [FontRender] for that. type Font struct { //types:add - // size of font to render (inherited); converted to points when getting font to use + // Size of font to render (inherited). + // Converted to points when getting font to use. Size units.Value - // font family (inherited): ordered list of comma-separated names from more general to more specific to use; use split on , to parse + // Family name for font (inherited): ordered list of comma-separated names + // from more general to more specific to use. Use split on, to parse. Family string - // style (inherited): normal, italic, etc + // Style (inherited): normal, italic, etc. Style FontStyles - // weight (inherited): normal, bold, etc + // Weight (inherited): normal, bold, etc. Weight FontWeights - // font stretch / condense options (inherited) + // Stretch / condense options (inherited). Stretch FontStretch - // normal or small caps (inherited) + // Variant specifies normal or small caps (inherited). Variant FontVariants // Decoration contains the bit flag [TextDecorations] @@ -45,15 +47,16 @@ type Font struct { //types:add // It is not inherited. Decoration TextDecorations - // super / sub script (not inherited) + // Shift is the super / sub script (not inherited). Shift BaselineShifts - // full font information including enhanced metrics and actual font codes for drawing text; this is a pointer into FontLibrary of loaded fonts + // Face has full font information including enhanced metrics and actual + // font codes for drawing text; this is a pointer into FontLibrary of loaded fonts. Face *FontFace `display:"-"` } func (fs *Font) Defaults() { - fs.Size = units.Dp(16) + fs.Size.Dp(16) } // InheritFields from parent diff --git a/styles/typegen.go b/styles/typegen.go index d0d3de6ca0..1264874c8c 100644 --- a/styles/typegen.go +++ b/styles/typegen.go @@ -10,7 +10,7 @@ var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/styles.Border", IDN var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/styles.Shadow", IDName: "shadow", Doc: "style parameters for shadows", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Fields: []types.Field{{Name: "OffsetX", Doc: "OffsetX is th horizontal offset of the shadow.\nPositive moves it right, negative moves it left."}, {Name: "OffsetY", Doc: "OffsetY is the vertical offset of the shadow.\nPositive moves it down, negative moves it up."}, {Name: "Blur", Doc: "Blur specifies the blur radius of the shadow.\nHigher numbers make it more blurry."}, {Name: "Spread", Doc: "Spread specifies the spread radius of the shadow.\nPositive numbers increase the size of the shadow,\nand negative numbers decrease the size."}, {Name: "Color", Doc: "Color specifies the color of the shadow."}, {Name: "Inset", Doc: "Inset specifies whether the shadow is inset within the\nbox instead of outset outside of the box.\nTODO: implement."}}}) -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/styles.Font", IDName: "font", Doc: "Font contains all font styling information.\nMost of font information is inherited.\nFont does not include all information needed\nfor rendering -- see [FontRender] for that.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Fields: []types.Field{{Name: "Size", Doc: "size of font to render (inherited); converted to points when getting font to use"}, {Name: "Family", Doc: "font family (inherited): ordered list of comma-separated names from more general to more specific to use; use split on , to parse"}, {Name: "Style", Doc: "style (inherited): normal, italic, etc"}, {Name: "Weight", Doc: "weight (inherited): normal, bold, etc"}, {Name: "Stretch", Doc: "font stretch / condense options (inherited)"}, {Name: "Variant", Doc: "normal or small caps (inherited)"}, {Name: "Decoration", Doc: "Decoration contains the bit flag [TextDecorations]\n(underline, line-through, etc). It must be set using\n[Font.SetDecoration] since it contains bit flags.\nIt is not inherited."}, {Name: "Shift", Doc: "super / sub script (not inherited)"}, {Name: "Face", Doc: "full font information including enhanced metrics and actual font codes for drawing text; this is a pointer into FontLibrary of loaded fonts"}}}) +var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/styles.Font", IDName: "font", Doc: "Font contains all font styling information.\nMost of font information is inherited.\nFont does not include all information needed\nfor rendering -- see [FontRender] for that.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Fields: []types.Field{{Name: "Size", Doc: "Size of font to render (inherited).\nConverted to points when getting font to use."}, {Name: "Family", Doc: "Family name for font (inherited): ordered list of comma-separated names\nfrom more general to more specific to use. Use split on, to parse."}, {Name: "Style", Doc: "Style (inherited): normal, italic, etc."}, {Name: "Weight", Doc: "Weight (inherited): normal, bold, etc."}, {Name: "Stretch", Doc: "Stretch / condense options (inherited)."}, {Name: "Variant", Doc: "Variant specifies normal or small caps (inherited)."}, {Name: "Decoration", Doc: "Decoration contains the bit flag [TextDecorations]\n(underline, line-through, etc). It must be set using\n[Font.SetDecoration] since it contains bit flags.\nIt is not inherited."}, {Name: "Shift", Doc: "Shift is the super / sub script (not inherited)."}, {Name: "Face", Doc: "Face has full font information including enhanced metrics and actual\nfont codes for drawing text; this is a pointer into FontLibrary of loaded fonts."}}}) var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/styles.FontRender", IDName: "font-render", Doc: "FontRender contains all font styling information\nthat is needed for SVG text rendering. It is passed to\nPaint and Style functions. It should typically not be\nused by end-user code -- see [Font] for that.\nIt stores all values as pointers so that they correspond\nto the values of the style object it was derived from.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Embeds: []types.Field{{Name: "Font"}}, Fields: []types.Field{{Name: "Color", Doc: "text color (inherited)"}, {Name: "Background", Doc: "background color (not inherited, transparent by default)"}, {Name: "Opacity", Doc: "alpha value between 0 and 1 to apply to the foreground and background of this element and all of its children"}}}) diff --git a/system/app.go b/system/app.go index dfb7d763cc..72f0a42e05 100644 --- a/system/app.go +++ b/system/app.go @@ -199,6 +199,9 @@ type App interface { // IsDark returns whether the system color theme is dark (as oppposed to light). IsDark() bool + + // GPUDevice returns the gpu.GPU device if it is present (else nil). + GPUDevice() any } // OnSystemWindowCreated is a channel used to communicate that the underlying diff --git a/system/driver/base/app.go b/system/driver/base/app.go index 11969dcdbf..05a6bd2806 100644 --- a/system/driver/base/app.go +++ b/system/driver/base/app.go @@ -182,6 +182,10 @@ func (a *App) IsDark() bool { return a.Dark } +func (a *App) GPUDevice() any { + return nil +} + func (a *App) GetScreens() { // no-op by default } diff --git a/system/driver/desktop/app.go b/system/driver/desktop/app.go index 8a184b8f8b..6deb0ecba8 100644 --- a/system/driver/desktop/app.go +++ b/system/driver/desktop/app.go @@ -51,6 +51,10 @@ func (a *App) SendEmptyEvent() { glfw.PostEmptyEvent() } +func (a *App) GPUDevice() any { + return a.GPU +} + // MainLoop starts running event loop on main thread (must be called // from the main thread). func (a *App) MainLoop() { diff --git a/system/driver/web/app.go b/system/driver/web/app.go index d87bcc4127..47c2a13f53 100644 --- a/system/driver/web/app.go +++ b/system/driver/web/app.go @@ -158,6 +158,10 @@ func (a *App) Resize() { a.Event.WindowResize() } +func (a *App) GPUDevice() any { + return a.Draw.wgpu +} + func (a *App) DataDir() string { return "/home/me/.data" } diff --git a/system/window.go b/system/window.go index 1bc84d796b..50ef38f542 100644 --- a/system/window.go +++ b/system/window.go @@ -329,6 +329,12 @@ func (o *NewWindowOptions) Fixup() { sc := TheApp.Screen(o.Screen) scsz := sc.Geometry.Size() // window coords size + if o.Flags.HasFlag(Fullscreen) { + o.Size = sc.PixelSize + o.Pos = image.Point{} + return + } + if o.Size.X <= 0 { o.StdPixels = false o.Size.X = int(0.8 * float32(scsz.X) * sc.DevicePixelRatio) diff --git a/tensor/README.md b/tensor/README.md deleted file mode 100644 index 4dadca823b..0000000000 --- a/tensor/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# Tensor - -Tensor and related sub-packages provide a simple yet powerful framework for representing n-dimensional data of various types, providing similar functionality to the widely used `numpy` and `pandas` libraries in python, and the commercial MATLAB framework. - -* [table](table) organizes multiple Tensors as columns in a data `Table`, aligned by a common row dimension as the outer-most dimension of each tensor. Because the columns are tensors, each cell (value associated with a given row) can also be n-dimensional, allowing efficient representation of patterns and other high-dimensional data. Furthermore, the entire column is organized as a single contiguous slice of data, so it can be efficiently processed. The `table` package also has an `IndexView` that provides an indexed view into the rows of the table for highly efficient filtering and sorting of data. - - Data that is encoded as a slice of `struct`s can be bidirectionally converted to / from a Table, which then provides more powerful sorting, filtering and other functionality, including the plotcore. - -* [tensorcore](tensorcore) provides core widgets for the `Tensor` and `Table` data. - -* [stats](stats) implements a number of different ways of analyzing tensor and table data. - -* [plot/plotcore](../plot/plotcore) supports interactive plotting of `Table` data. - - -# History - -This package was originally developed as [etable](https://github.com/emer/etable) as part of the _emergent_ software framework. It always depended on the GUI framework that became Cogent Core, and having it integrated within the Core monorepo makes it easier to integrate updates, and also makes it easier to build advanced data management and visualization applications. For example, the [plot/plotcore](../plot/plotcore) package uses the `Table` to support flexible and powerful plotting functionality. - diff --git a/tensor/base.go b/tensor/base.go deleted file mode 100644 index 8bad3301f1..0000000000 --- a/tensor/base.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tensor - -import ( - "fmt" - "log" - "reflect" - "unsafe" - - "cogentcore.org/core/base/reflectx" - "cogentcore.org/core/base/slicesx" -) - -// Base is an n-dim array of float64s. -type Base[T any] struct { - Shp Shape - Values []T - Meta map[string]string -} - -// Shape returns a pointer to the shape that fully parametrizes the tensor shape -func (tsr *Base[T]) Shape() *Shape { return &tsr.Shp } - -// Len returns the number of elements in the tensor (product of shape dimensions). -func (tsr *Base[T]) Len() int { return tsr.Shp.Len() } - -// NumDims returns the total number of dimensions. -func (tsr *Base[T]) NumDims() int { return tsr.Shp.NumDims() } - -// DimSize returns size of given dimension -func (tsr *Base[T]) DimSize(dim int) int { return tsr.Shp.DimSize(dim) } - -// RowCellSize returns the size of the outer-most Row shape dimension, -// and the size of all the remaining inner dimensions (the "cell" size). -// Used for Tensors that are columns in a data table. -func (tsr *Base[T]) RowCellSize() (rows, cells int) { - return tsr.Shp.RowCellSize() -} - -// DataType returns the type of the data elements in the tensor. -// Bool is returned for the Bits tensor type. -func (tsr *Base[T]) DataType() reflect.Kind { - var v T - return reflect.TypeOf(v).Kind() -} - -func (tsr *Base[T]) Sizeof() int64 { - var v T - return int64(unsafe.Sizeof(v)) * int64(tsr.Len()) -} - -func (tsr *Base[T]) Bytes() []byte { - return slicesx.ToBytes(tsr.Values) -} - -func (tsr *Base[T]) Value(i []int) T { j := tsr.Shp.Offset(i); return tsr.Values[j] } -func (tsr *Base[T]) Value1D(i int) T { return tsr.Values[i] } -func (tsr *Base[T]) Set(i []int, val T) { j := tsr.Shp.Offset(i); tsr.Values[j] = val } -func (tsr *Base[T]) Set1D(i int, val T) { tsr.Values[i] = val } - -// SetShape sets the shape params, resizing backing storage appropriately -func (tsr *Base[T]) SetShape(sizes []int, names ...string) { - tsr.Shp.SetShape(sizes, names...) - nln := tsr.Len() - tsr.Values = slicesx.SetLength(tsr.Values, nln) -} - -// SetNumRows sets the number of rows (outer-most dimension) in a RowMajor organized tensor. -func (tsr *Base[T]) SetNumRows(rows int) { - rows = max(1, rows) // must be > 0 - _, cells := tsr.Shp.RowCellSize() - nln := rows * cells - tsr.Shp.Sizes[0] = rows - tsr.Values = slicesx.SetLength(tsr.Values, nln) -} - -// subSpaceImpl returns a new tensor with innermost subspace at given -// offset(s) in outermost dimension(s) (len(offs) < NumDims). -// The new tensor points to the values of the this tensor (i.e., modifications -// will affect both), as its Values slice is a view onto the original (which -// is why only inner-most contiguous supsaces are supported). -// Use Clone() method to separate the two. -func (tsr *Base[T]) subSpaceImpl(offs []int) *Base[T] { - nd := tsr.NumDims() - od := len(offs) - if od >= nd { - return nil - } - stsr := &Base[T]{} - stsr.SetShape(tsr.Shp.Sizes[od:], tsr.Shp.Names[od:]...) - sti := make([]int, nd) - copy(sti, offs) - stoff := tsr.Shp.Offset(sti) - sln := stsr.Len() - stsr.Values = tsr.Values[stoff : stoff+sln] - return stsr -} - -func (tsr *Base[T]) StringValue(i []int) string { - j := tsr.Shp.Offset(i) - return reflectx.ToString(tsr.Values[j]) -} -func (tsr *Base[T]) String1D(off int) string { return reflectx.ToString(tsr.Values[off]) } - -func (tsr *Base[T]) StringRowCell(row, cell int) string { - _, sz := tsr.Shp.RowCellSize() - return reflectx.ToString(tsr.Values[row*sz+cell]) -} - -// Label satisfies the core.Labeler interface for a summary description of the tensor -func (tsr *Base[T]) Label() string { - return fmt.Sprintf("Tensor: %s", tsr.Shp.String()) -} - -// Dims is the gonum/mat.Matrix interface method for returning the dimensionality of the -// 2D Matrix. Assumes Row-major ordering and logs an error if NumDims < 2. -func (tsr *Base[T]) Dims() (r, c int) { - nd := tsr.NumDims() - if nd < 2 { - log.Println("tensor Dims gonum Matrix call made on Tensor with dims < 2") - return 0, 0 - } - return tsr.Shp.DimSize(nd - 2), tsr.Shp.DimSize(nd - 1) -} - -// Symmetric is the gonum/mat.Matrix interface method for returning the dimensionality of a symmetric -// 2D Matrix. -func (tsr *Base[T]) Symmetric() (r int) { - nd := tsr.NumDims() - if nd < 2 { - log.Println("tensor Symmetric gonum Matrix call made on Tensor with dims < 2") - return 0 - } - if tsr.Shp.DimSize(nd-2) != tsr.Shp.DimSize(nd-1) { - log.Println("tensor Symmetric gonum Matrix call made on Tensor that is not symmetric") - return 0 - } - return tsr.Shp.DimSize(nd - 1) -} - -// SymmetricDim returns the number of rows/columns in the matrix. -func (tsr *Base[T]) SymmetricDim() int { - nd := tsr.NumDims() - if nd < 2 { - log.Println("tensor Symmetric gonum Matrix call made on Tensor with dims < 2") - return 0 - } - if tsr.Shp.DimSize(nd-2) != tsr.Shp.DimSize(nd-1) { - log.Println("tensor Symmetric gonum Matrix call made on Tensor that is not symmetric") - return 0 - } - return tsr.Shp.DimSize(nd - 1) -} - -// SetMetaData sets a key=value meta data (stored as a map[string]string). -// For TensorGrid display: top-zero=+/-, odd-row=+/-, image=+/-, -// min, max set fixed min / max values, background=color -func (tsr *Base[T]) SetMetaData(key, val string) { - if tsr.Meta == nil { - tsr.Meta = make(map[string]string) - } - tsr.Meta[key] = val -} - -// MetaData retrieves value of given key, bool = false if not set -func (tsr *Base[T]) MetaData(key string) (string, bool) { - if tsr.Meta == nil { - return "", false - } - val, ok := tsr.Meta[key] - return val, ok -} - -// MetaDataMap returns the underlying map used for meta data -func (tsr *Base[T]) MetaDataMap() map[string]string { - return tsr.Meta -} - -// CopyMetaData copies meta data from given source tensor -func (tsr *Base[T]) CopyMetaData(frm Tensor) { - fmap := frm.MetaDataMap() - if len(fmap) == 0 { - return - } - if tsr.Meta == nil { - tsr.Meta = make(map[string]string) - } - for k, v := range fmap { - tsr.Meta[k] = v - } -} diff --git a/tensor/bits.go b/tensor/bits.go deleted file mode 100644 index 76a15bfd39..0000000000 --- a/tensor/bits.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tensor - -import ( - "fmt" - "log/slog" - "reflect" - "strings" - - "cogentcore.org/core/base/reflectx" - "cogentcore.org/core/base/slicesx" - "cogentcore.org/core/tensor/bitslice" - "gonum.org/v1/gonum/mat" -) - -// Bits is a tensor of bits backed by a bitslice.Slice for efficient storage -// of binary data -type Bits struct { - Shp Shape - Values bitslice.Slice - Meta map[string]string -} - -// NewBits returns a new n-dimensional tensor of bit values -// with the given sizes per dimension (shape), and optional dimension names. -func NewBits(sizes []int, names ...string) *Bits { - tsr := &Bits{} - tsr.SetShape(sizes, names...) - tsr.Values = bitslice.Make(tsr.Len(), 0) - return tsr -} - -// NewBitsShape returns a new n-dimensional tensor of bit values -// using given shape. -func NewBitsShape(shape *Shape) *Bits { - tsr := &Bits{} - tsr.Shp.CopyShape(shape) - tsr.Values = bitslice.Make(tsr.Len(), 0) - return tsr -} - -func Float64ToBool(val float64) bool { - bv := true - if val == 0 { - bv = false - } - return bv -} - -func BoolToFloat64(bv bool) float64 { - if bv { - return 1 - } else { - return 0 - } -} - -func (tsr *Bits) IsString() bool { - return false -} - -// DataType returns the type of the data elements in the tensor. -// Bool is returned for the Bits tensor type. -func (tsr *Bits) DataType() reflect.Kind { - return reflect.Bool -} - -func (tsr *Bits) Sizeof() int64 { - return int64(len(tsr.Values)) -} - -func (tsr *Bits) Bytes() []byte { - return slicesx.ToBytes(tsr.Values) -} - -// Shape returns a pointer to the shape that fully parametrizes the tensor shape -func (tsr *Bits) Shape() *Shape { return &tsr.Shp } - -// Len returns the number of elements in the tensor (product of shape dimensions). -func (tsr *Bits) Len() int { return tsr.Shp.Len() } - -// NumDims returns the total number of dimensions. -func (tsr *Bits) NumDims() int { return tsr.Shp.NumDims() } - -// DimSize returns size of given dimension -func (tsr *Bits) DimSize(dim int) int { return tsr.Shp.DimSize(dim) } - -// RowCellSize returns the size of the outer-most Row shape dimension, -// and the size of all the remaining inner dimensions (the "cell" size). -// Used for Tensors that are columns in a data table. -func (tsr *Bits) RowCellSize() (rows, cells int) { - return tsr.Shp.RowCellSize() -} - -// Value returns value at given tensor index -func (tsr *Bits) Value(i []int) bool { j := int(tsr.Shp.Offset(i)); return tsr.Values.Index(j) } - -// Value1D returns value at given tensor 1D (flat) index -func (tsr *Bits) Value1D(i int) bool { return tsr.Values.Index(i) } - -func (tsr *Bits) Set(i []int, val bool) { j := int(tsr.Shp.Offset(i)); tsr.Values.Set(j, val) } -func (tsr *Bits) Set1D(i int, val bool) { tsr.Values.Set(i, val) } - -// SetShape sets the shape params, resizing backing storage appropriately -func (tsr *Bits) SetShape(sizes []int, names ...string) { - tsr.Shp.SetShape(sizes, names...) - nln := tsr.Len() - tsr.Values.SetLen(nln) -} - -// SetNumRows sets the number of rows (outer-most dimension) in a RowMajor organized tensor. -func (tsr *Bits) SetNumRows(rows int) { - rows = max(1, rows) // must be > 0 - _, cells := tsr.Shp.RowCellSize() - nln := rows * cells - tsr.Shp.Sizes[0] = rows - tsr.Values.SetLen(nln) -} - -// SubSpace is not possible with Bits -func (tsr *Bits) SubSpace(offs []int) Tensor { - return nil -} - -func (tsr *Bits) Float(i []int) float64 { - j := tsr.Shp.Offset(i) - return BoolToFloat64(tsr.Values.Index(j)) -} - -func (tsr *Bits) SetFloat(i []int, val float64) { - j := tsr.Shp.Offset(i) - tsr.Values.Set(j, Float64ToBool(val)) -} - -func (tsr *Bits) StringValue(i []int) string { - j := tsr.Shp.Offset(i) - return reflectx.ToString(tsr.Values.Index(j)) -} - -func (tsr *Bits) SetString(i []int, val string) { - if bv, err := reflectx.ToBool(val); err == nil { - j := tsr.Shp.Offset(i) - tsr.Values.Set(j, bv) - } -} - -func (tsr *Bits) Float1D(off int) float64 { - return BoolToFloat64(tsr.Values.Index(off)) -} -func (tsr *Bits) SetFloat1D(off int, val float64) { - tsr.Values.Set(off, Float64ToBool(val)) -} - -func (tsr *Bits) FloatRowCell(row, cell int) float64 { - _, sz := tsr.RowCellSize() - return BoolToFloat64(tsr.Values.Index(row*sz + cell)) -} -func (tsr *Bits) SetFloatRowCell(row, cell int, val float64) { - _, sz := tsr.RowCellSize() - tsr.Values.Set(row*sz+cell, Float64ToBool(val)) -} - -func (tsr *Bits) Floats(flt *[]float64) { - sz := tsr.Len() - *flt = slicesx.SetLength(*flt, sz) - for j := 0; j < sz; j++ { - (*flt)[j] = BoolToFloat64(tsr.Values.Index(j)) - } -} - -// SetFloats sets tensor values from a []float64 slice (copies values). -func (tsr *Bits) SetFloats(vals []float64) { - sz := min(tsr.Len(), len(vals)) - for j := 0; j < sz; j++ { - tsr.Values.Set(j, Float64ToBool(vals[j])) - } -} - -func (tsr *Bits) String1D(off int) string { - return reflectx.ToString(tsr.Values.Index(off)) -} - -func (tsr *Bits) SetString1D(off int, val string) { - if bv, err := reflectx.ToBool(val); err == nil { - tsr.Values.Set(off, bv) - } -} - -func (tsr *Bits) StringRowCell(row, cell int) string { - _, sz := tsr.RowCellSize() - return reflectx.ToString(tsr.Values.Index(row*sz + cell)) -} - -func (tsr *Bits) SetStringRowCell(row, cell int, val string) { - if bv, err := reflectx.ToBool(val); err == nil { - _, sz := tsr.RowCellSize() - tsr.Values.Set(row*sz+cell, bv) - } -} - -// Label satisfies the core.Labeler interface for a summary description of the tensor -func (tsr *Bits) Label() string { - return fmt.Sprintf("tensor.Bits: %s", tsr.Shp.String()) -} - -// SetMetaData sets a key=value meta data (stored as a map[string]string). -// For TensorGrid display: top-zero=+/-, odd-row=+/-, image=+/-, -// min, max set fixed min / max values, background=color -func (tsr *Bits) SetMetaData(key, val string) { - if tsr.Meta == nil { - tsr.Meta = make(map[string]string) - } - tsr.Meta[key] = val -} - -// MetaData retrieves value of given key, bool = false if not set -func (tsr *Bits) MetaData(key string) (string, bool) { - if tsr.Meta == nil { - return "", false - } - val, ok := tsr.Meta[key] - return val, ok -} - -// MetaDataMap returns the underlying map used for meta data -func (tsr *Bits) MetaDataMap() map[string]string { - return tsr.Meta -} - -// CopyMetaData copies meta data from given source tensor -func (tsr *Bits) CopyMetaData(frm Tensor) { - fmap := frm.MetaDataMap() - if len(fmap) == 0 { - return - } - if tsr.Meta == nil { - tsr.Meta = make(map[string]string) - } - for k, v := range fmap { - tsr.Meta[k] = v - } -} - -// Range is not applicable to Bits tensor -func (tsr *Bits) Range() (min, max float64, minIndex, maxIndex int) { - minIndex = -1 - maxIndex = -1 - return -} - -// SetZeros is simple convenience function initialize all values to 0 -func (tsr *Bits) SetZeros() { - ln := tsr.Len() - for j := 0; j < ln; j++ { - tsr.Values.Set(j, false) - } -} - -// Clone clones this tensor, creating a duplicate copy of itself with its -// own separate memory representation of all the values, and returns -// that as a Tensor (which can be converted into the known type as needed). -func (tsr *Bits) Clone() Tensor { - csr := NewBitsShape(&tsr.Shp) - csr.Values = tsr.Values.Clone() - return csr -} - -// CopyFrom copies all avail values from other tensor into this tensor, with an -// optimized implementation if the other tensor is of the same type, and -// otherwise it goes through appropriate standard type. -func (tsr *Bits) CopyFrom(frm Tensor) { - if fsm, ok := frm.(*Bits); ok { - copy(tsr.Values, fsm.Values) - return - } - sz := min(len(tsr.Values), frm.Len()) - for i := 0; i < sz; i++ { - tsr.Values.Set(i, Float64ToBool(frm.Float1D(i))) - } -} - -// CopyShapeFrom copies just the shape from given source tensor -// calling SetShape with the shape params from source (see for more docs). -func (tsr *Bits) CopyShapeFrom(frm Tensor) { - tsr.SetShape(frm.Shape().Sizes, frm.Shape().Names...) -} - -// CopyCellsFrom copies given range of values from other tensor into this tensor, -// using flat 1D indexes: to = starting index in this Tensor to start copying into, -// start = starting index on from Tensor to start copying from, and n = number of -// values to copy. Uses an optimized implementation if the other tensor is -// of the same type, and otherwise it goes through appropriate standard type. -func (tsr *Bits) CopyCellsFrom(frm Tensor, to, start, n int) { - if fsm, ok := frm.(*Bits); ok { - for i := 0; i < n; i++ { - tsr.Values.Set(to+i, fsm.Values.Index(start+i)) - } - return - } - for i := 0; i < n; i++ { - tsr.Values.Set(to+i, Float64ToBool(frm.Float1D(start+i))) - } -} - -// Dims is the gonum/mat.Matrix interface method for returning the dimensionality of the -// 2D Matrix. Not supported for Bits -- do not call! -func (tsr *Bits) Dims() (r, c int) { - slog.Error("tensor Dims gonum Matrix call made on Bits Tensor; not supported") - return 0, 0 -} - -// At is the gonum/mat.Matrix interface method for returning 2D matrix element at given -// row, column index. Not supported for Bits -- do not call! -func (tsr *Bits) At(i, j int) float64 { - slog.Error("tensor At gonum Matrix call made on Bits Tensor; not supported") - return 0 -} - -// T is the gonum/mat.Matrix transpose method. -// Not supported for Bits -- do not call! -func (tsr *Bits) T() mat.Matrix { - slog.Error("tensor T gonum Matrix call made on Bits Tensor; not supported") - return mat.Transpose{tsr} -} - -// String satisfies the fmt.Stringer interface for string of tensor data -func (tsr *Bits) String() string { - str := tsr.Label() - sz := tsr.Len() - if sz > 1000 { - return str - } - var b strings.Builder - b.WriteString(str) - b.WriteString("\n") - oddRow := true - rows, cols, _, _ := Projection2DShape(&tsr.Shp, oddRow) - for r := 0; r < rows; r++ { - rc, _ := Projection2DCoords(&tsr.Shp, oddRow, r, 0) - b.WriteString(fmt.Sprintf("%v: ", rc)) - for c := 0; c < cols; c++ { - vl := Projection2DValue(tsr, oddRow, r, c) - b.WriteString(fmt.Sprintf("%g ", vl)) - } - b.WriteString("\n") - } - return b.String() -} diff --git a/tensor/bitslice/bitslice.go b/tensor/bitslice/bitslice.go deleted file mode 100644 index eaefa05d9b..0000000000 --- a/tensor/bitslice/bitslice.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright (c) 2024, The Cogent Core Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package bitslice implements a simple slice-of-bits using a []byte slice for storage, -// which is used for efficient storage of boolean data, such as projection connectivity patterns. -package bitslice - -import "fmt" - -// bitslice.Slice is the slice of []byte that holds the bits. -// first byte maintains the number of bits used in the last byte (0-7). -// when 0 then prior byte is all full and a new one must be added for append. -type Slice []byte - -// BitIndex returns the byte, bit index of given bit index -func BitIndex(idx int) (byte int, bit uint32) { - return idx / 8, uint32(idx % 8) -} - -// Make makes a new bitslice of given length and capacity (optional, pass 0 for default) -// *bits* (rounds up 1 for both). -// also reserves first byte for extra bits value -func Make(ln, cp int) Slice { - by, bi := BitIndex(ln) - bln := by - if bi != 0 { - bln++ - } - var sl Slice - if cp > 0 { - sl = make(Slice, bln+1, (cp/8)+2) - } else { - sl = make(Slice, bln+1) - } - sl[0] = byte(bi) - return sl -} - -// Len returns the length of the slice in bits -func (bs *Slice) Len() int { - ln := len(*bs) - if ln == 0 { - return 0 - } - eb := (*bs)[0] - bln := ln - 1 - if eb != 0 { - bln-- - } - tln := bln*8 + int(eb) - return tln -} - -// Cap returns the capacity of the slice in bits -- always modulo 8 -func (bs *Slice) Cap() int { - return (cap(*bs) - 1) * 8 -} - -// SetLen sets the length of the slice, copying values if a new allocation is required -func (bs *Slice) SetLen(ln int) { - by, bi := BitIndex(ln) - bln := by - if bi != 0 { - bln++ - } - if cap(*bs) >= bln+1 { - *bs = (*bs)[0 : bln+1] - (*bs)[0] = byte(bi) - } else { - sl := make(Slice, bln+1) - sl[0] = byte(bi) - copy(sl, *bs) - *bs = sl - } -} - -// Set sets value of given bit index -- no extra range checking is performed -- will panic if out of range -func (bs *Slice) Set(idx int, val bool) { - by, bi := BitIndex(idx) - if val { - (*bs)[by+1] |= 1 << bi - } else { - (*bs)[by+1] &^= 1 << bi - } -} - -// Index returns bit value at given bit index -func (bs *Slice) Index(idx int) bool { - by, bi := BitIndex(idx) - return ((*bs)[by+1] & (1 << bi)) != 0 -} - -// Append adds a bit to the slice and returns possibly new slice, possibly old slice.. -func (bs *Slice) Append(val bool) Slice { - if len(*bs) == 0 { - *bs = Make(1, 0) - bs.Set(0, val) - return *bs - } - ln := bs.Len() - eb := (*bs)[0] - if eb == 0 { - *bs = append(*bs, 0) // now we add - (*bs)[0] = 1 - } else if eb < 7 { - (*bs)[0]++ - } else { - (*bs)[0] = 0 - } - bs.Set(ln, val) - return *bs -} - -// SetAll sets all values to either on or off -- much faster than setting individual bits -func (bs *Slice) SetAll(val bool) { - ln := len(*bs) - for i := 1; i < ln; i++ { - if val { - (*bs)[i] = 0xFF - } else { - (*bs)[i] = 0 - } - } -} - -// ToBools converts to a []bool slice -func (bs *Slice) ToBools() []bool { - ln := len(*bs) - bb := make([]bool, ln) - for i := 0; i < ln; i++ { - bb[i] = bs.Index(i) - } - return bb -} - -// Clone creates a new copy of this bitslice with separate memory -func (bs *Slice) Clone() Slice { - cp := make(Slice, len(*bs)) - copy(cp, *bs) - return cp -} - -// SubSlice returns a new Slice from given start, end range indexes of this slice -// if end is <= 0 then the length of the source slice is used (equivalent to omitting -// the number after the : in a Go subslice expression) -func (bs *Slice) SubSlice(start, end int) Slice { - ln := bs.Len() - if end <= 0 { - end = ln - } - if end > ln { - panic("bitslice.SubSlice: end index is beyond length of slice") - } - if start > end { - panic("bitslice.SubSlice: start index greater than end index") - } - nln := end - start - if nln <= 0 { - return Slice{} - } - ss := Make(nln, 0) - for i := 0; i < nln; i++ { - ss.Set(i, bs.Index(i+start)) - } - return ss -} - -// Delete returns a new bit slice with N elements removed starting at given index. -// This must be a copy given the nature of the 8-bit aliasing. -func (bs *Slice) Delete(start, n int) Slice { - ln := bs.Len() - if n <= 0 { - panic("bitslice.Delete: n <= 0") - } - if start >= ln { - panic("bitslice.Delete: start index >= length") - } - end := start + n - if end > ln { - panic("bitslice.Delete: end index greater than length") - } - nln := ln - n - if nln <= 0 { - return Slice{} - } - ss := Make(nln, 0) - for i := 0; i < start; i++ { - ss.Set(i, bs.Index(i)) - } - for i := end; i < ln; i++ { - ss.Set(i-n, bs.Index(i)) - } - return ss -} - -// Insert returns a new bit slice with N false elements inserted starting at given index. -// This must be a copy given the nature of the 8-bit aliasing. -func (bs *Slice) Insert(start, n int) Slice { - ln := bs.Len() - if n <= 0 { - panic("bitslice.Insert: n <= 0") - } - if start > ln { - panic("bitslice.Insert: start index greater than length") - } - nln := ln + n - ss := Make(nln, 0) - for i := 0; i < start; i++ { - ss.Set(i, bs.Index(i)) - } - for i := start; i < ln; i++ { - ss.Set(i+n, bs.Index(i)) - } - return ss -} - -// String satisfies the fmt.Stringer interface -func (bs *Slice) String() string { - ln := bs.Len() - if ln == 0 { - if *bs == nil { - return "nil" - } - return "[]" - } - mx := ln - if mx > 1000 { - mx = 1000 - } - str := "[" - for i := 0; i < mx; i++ { - val := bs.Index(i) - if val { - str += "1 " - } else { - str += "0 " - } - if (i+1)%80 == 0 { - str += "\n" - } - } - if ln > mx { - str += fmt.Sprintf("...(len=%v)", ln) - } - str += "]" - return str -} diff --git a/tensor/bitslice/bitslice_test.go b/tensor/bitslice/bitslice_test.go deleted file mode 100644 index ceadc96cc6..0000000000 --- a/tensor/bitslice/bitslice_test.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright (c) 2024, The Cogent Core Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bitslice - -import ( - "testing" -) - -func TestBitSlice10(t *testing.T) { - bs := Make(10, 0) - - ln := bs.Len() - if ln != 10 { - t.Errorf("len: %v != 10\n", ln) - } - - // fmt.Printf("empty: %v\n", bs.String()) - var ex, out string - ex = "[0 0 0 0 0 0 0 0 0 0 ]" - out = bs.String() - if out != ex { - t.Errorf("empty != %v", out) - } - - bs.Set(2, true) - // fmt.Printf("2=true: %v\n", bs.String()) - ex = "[0 0 1 0 0 0 0 0 0 0 ]" - out = bs.String() - if out != ex { - t.Errorf("2=true != %v", out) - } - - bs.Set(4, true) - // fmt.Printf("4=true: %v\n", bs.String()) - ex = "[0 0 1 0 1 0 0 0 0 0 ]" - out = bs.String() - if out != ex { - t.Errorf("4=true != %v", out) - } - - bs.Set(9, true) - // fmt.Printf("9=true: %v\n", bs.String()) - ex = "[0 0 1 0 1 0 0 0 0 1 ]" - out = bs.String() - if out != ex { - t.Errorf("9=true != %v", out) - } - - bs.Append(true) - // fmt.Printf("append true: %v\n", bs.String()) - ex = "[0 0 1 0 1 0 0 0 0 1 1 ]" - out = bs.String() - if out != ex { - t.Errorf("append true != %v", out) - } - - bs.Append(false) - // fmt.Printf("append false: %v\n", bs.String()) - ex = "[0 0 1 0 1 0 0 0 0 1 1 0 ]" - out = bs.String() - if out != ex { - t.Errorf("append false != %v", out) - } - - ss := bs.SubSlice(2, 6) - // fmt.Printf("subslice: %v\n", ss.String()) - ex = "[1 0 1 0 ]" - out = ss.String() - if out != ex { - t.Errorf("subslice[2,6] != %v", out) - } - - ds := bs.Delete(2, 4) - // fmt.Printf("delete: %v\n", ds.String()) - ex = "[0 0 0 0 0 1 1 0 ]" - out = ds.String() - if out != ex { - t.Errorf("Delete(2,4) != %v", out) - } - - is := bs.Insert(3, 2) - // fmt.Printf("insert: %v\n", is.String()) - ex = "[0 0 1 0 0 0 1 0 0 0 0 1 1 0 ]" - out = is.String() - if out != ex { - t.Errorf("Insert(3,2) != %v", out) - } -} - -func TestBitSlice8(t *testing.T) { - bs := Make(8, 0) - - ln := bs.Len() - if ln != 8 { - t.Errorf("len: %v != 8\n", ln) - } - - // fmt.Printf("empty: %v\n", bs.String()) - var ex, out string - ex = "[0 0 0 0 0 0 0 0 ]" - out = bs.String() - if out != ex { - t.Errorf("empty != %v", out) - } - - bs.Append(true) - ln = bs.Len() - if ln != 9 { - t.Errorf("len: %v != 9\n", ln) - } - // fmt.Printf("append true: %v\n", bs.String()) - ex = "[0 0 0 0 0 0 0 0 1 ]" - out = bs.String() - if out != ex { - t.Errorf("append true != %v", out) - } - - bs.Append(false) - ln = bs.Len() - if ln != 10 { - t.Errorf("len: %v != 10\n", ln) - } - // fmt.Printf("append false: %v\n", bs.String()) - ex = "[0 0 0 0 0 0 0 0 1 0 ]" - out = bs.String() - if out != ex { - t.Errorf("append false != %v", out) - } -} - -func TestBitSlice7(t *testing.T) { - bs := Make(7, 0) - - ln := bs.Len() - if ln != 7 { - t.Errorf("len: %v != 7\n", ln) - } - - // fmt.Printf("empty: %v\n", bs.String()) - var ex, out string - ex = "[0 0 0 0 0 0 0 ]" - out = bs.String() - if out != ex { - t.Errorf("empty != %v", out) - } - - bs.Append(true) - ln = bs.Len() - if ln != 8 { - t.Errorf("len: %v != 8\n", ln) - } - // fmt.Printf("append true: %v\n", bs.String()) - ex = "[0 0 0 0 0 0 0 1 ]" - out = bs.String() - if out != ex { - t.Errorf("append true != %v", out) - } - - bs.Append(false) - ln = bs.Len() - if ln != 9 { - t.Errorf("len: %v != 9\n", ln) - } - // fmt.Printf("append false: %v\n", bs.String()) - ex = "[0 0 0 0 0 0 0 1 0 ]" - out = bs.String() - if out != ex { - t.Errorf("append false != %v", out) - } -} diff --git a/tensor/cmd/tablecat/tablecat.go b/tensor/cmd/tablecat/tablecat.go deleted file mode 100644 index 5c3273257f..0000000000 --- a/tensor/cmd/tablecat/tablecat.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "sort" - "strconv" - - "cogentcore.org/core/core" - "cogentcore.org/core/tensor/stats/split" - "cogentcore.org/core/tensor/stats/stats" - "cogentcore.org/core/tensor/table" -) - -var ( - Output string - Col string - OutFile *os.File - OutWriter *bufio.Writer - LF = []byte("\n") - Delete bool - LogPrec = 4 -) - -func main() { - var help bool - var avg bool - var colavg bool - flag.BoolVar(&help, "help", false, "if true, report usage info") - flag.BoolVar(&avg, "avg", false, "if true, files must have same cols (ideally rows too, though not necessary), outputs average of any float-type columns across files") - flag.BoolVar(&colavg, "colavg", false, "if true, outputs average of any float-type columns aggregated by column") - flag.StringVar(&Col, "col", "", "name of column for colavg") - flag.StringVar(&Output, "output", "", "name of output file -- stdout if not specified") - flag.StringVar(&Output, "o", "", "name of output file -- stdout if not specified") - flag.BoolVar(&Delete, "delete", false, "if true, delete the source files after cat -- careful!") - flag.BoolVar(&Delete, "d", false, "if true, delete the source files after cat -- careful!") - flag.IntVar(&LogPrec, "prec", 4, "precision for number output -- defaults to 4") - flag.Parse() - - files := flag.Args() - - sort.StringSlice(files).Sort() - - if Output != "" { - OutFile, err := os.Create(Output) - if err != nil { - fmt.Println("Error creating output file:", err) - os.Exit(1) - } - defer OutFile.Close() - OutWriter = bufio.NewWriter(OutFile) - } else { - OutWriter = bufio.NewWriter(os.Stdout) - } - - switch { - case help || len(files) == 0: - fmt.Printf("\netcat is a data table concatenation utility\n\tassumes all files have header lines, and only retains the header for the first file\n\t(otherwise just use regular cat)\n") - flag.PrintDefaults() - case colavg: - AvgByColumn(files, Col) - case avg: - AvgCat(files) - default: - RawCat(files) - } - OutWriter.Flush() -} - -// RawCat concatenates all data in one big file -func RawCat(files []string) { - for fi, fn := range files { - fp, err := os.Open(fn) - if err != nil { - fmt.Println("Error opening file: ", err) - continue - } - scan := bufio.NewScanner(fp) - li := 0 - for { - if !scan.Scan() { - break - } - ln := scan.Bytes() - if li == 0 { - if fi == 0 { - OutWriter.Write(ln) - OutWriter.Write(LF) - } - } else { - OutWriter.Write(ln) - OutWriter.Write(LF) - } - li++ - } - fp.Close() - if Delete { - os.Remove(fn) - } - } -} - -// AvgCat computes average across all runs -func AvgCat(files []string) { - dts := make([]*table.Table, 0, len(files)) - for _, fn := range files { - dt := &table.Table{} - err := dt.OpenCSV(core.Filename(fn), table.Tab) - if err != nil { - fmt.Println("Error opening file: ", err) - continue - } - if dt.Rows == 0 { - fmt.Printf("File %v empty\n", fn) - continue - } - dts = append(dts, dt) - } - if len(dts) == 0 { - fmt.Println("No files or files are empty, exiting") - return - } - avgdt := stats.MeanTables(dts) - avgdt.SetMetaData("precision", strconv.Itoa(LogPrec)) - avgdt.SaveCSV(core.Filename(Output), table.Tab, table.Headers) -} - -// AvgByColumn computes average by given column for given files -// If column is empty, averages across all rows. -func AvgByColumn(files []string, column string) { - for _, fn := range files { - dt := table.NewTable() - err := dt.OpenCSV(core.Filename(fn), table.Tab) - if err != nil { - fmt.Println("Error opening file: ", err) - continue - } - if dt.Rows == 0 { - fmt.Printf("File %v empty\n", fn) - continue - } - ix := table.NewIndexView(dt) - var spl *table.Splits - if column == "" { - spl = split.All(ix) - } else { - spl = split.GroupBy(ix, column) - } - for ci, cl := range dt.Columns { - if cl.IsString() || dt.ColumnNames[ci] == column { - continue - } - split.AggIndex(spl, ci, stats.Mean) - } - avgdt := spl.AggsToTable(table.ColumnNameOnly) - avgdt.SetMetaData("precision", strconv.Itoa(LogPrec)) - avgdt.SaveCSV(core.Filename(Output), table.Tab, table.Headers) - } -} diff --git a/tensor/cmd/ttail/README.md b/tensor/cmd/ttail/README.md deleted file mode 100644 index d1852ac511..0000000000 --- a/tensor/cmd/ttail/README.md +++ /dev/null @@ -1,49 +0,0 @@ -# ttail: cli app to display, monitor tabular data files - -`ttail` is a `tail`-like command for displaing tabular data in a cli / terminal window, typically .csv / .tsv log / data tabular files. - -# Install - -This should install into your `$GOPATH/bin` dir: - -```bash -$ go install cogentcore.org/core/tensor/cmd/ttail@latest -``` - -# Run - -Just pass files as args, e.g., on the test files in this dir: - -```bash -$ ttail RA25* -``` - -# Keys - -This is shown when you press `h` in the app: - - -| Key(s) | Function | -| ------- | ------------------------------------------------------ | -| spc,n | page down | -| p | page up | -| f | scroll right-hand panel to the right | -| b | scroll right-hand panel to the left | -| w | widen the left-hand panel of columns | -| s | shrink the left-hand panel of columns | -| t | toggle tail-mode (auto updating as file grows) on/off | -| a | jump to top | -| e | jump to end | -| v | rotate down through the list of files (if not all displayed) | -| u | rotate up through the list of files (if not all displayed) | -| m | more minimum lines per file -- increase amount shown of each file | -| l | less minimum lines per file -- decrease amount shown of each file | -| d | toggle display of file names | -| c | toggle display of column numbers instead of names | -| q | quit | - -# History - -The original version was `pdptail`, written in perl, named for the PDP++ software, developed from 2004-2018. Then there was `etail` written in Go, for the emergent system: https://github.com/emer. - - diff --git a/tensor/cmd/ttail/file.go b/tensor/cmd/ttail/file.go deleted file mode 100644 index 2bbe7e19a1..0000000000 --- a/tensor/cmd/ttail/file.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) 2020, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "bufio" - "os" - "strings" - "time" -) - -// File represents one opened file -- all data is read in and maintained here -type File struct { - - // file name (either in same dir or include path) - FName string `desc:"file name (either in same dir or include path)"` - - // mod time of file when last read - ModTime time.Time `desc:"mod time of file when last read"` - - // delim is commas, not tabs - Commas bool `desc:"delim is commas, not tabs"` - - // rows of data == len(Data) - Rows int `desc:"rows of data == len(Data)"` - - // width of each column: resized to fit widest element - Widths []int `desc:"width of each column: resized to fit widest element"` - - // headers - Heads []string `desc:"headers"` - - // data -- rows 1..end - Data [][]string `desc:"data -- rows 1..end"` -} - -// Files is a slice of open files -type Files []*File - -// TheFiles are the set of open files -var TheFiles Files - -// Open opens file, reads it -func (fl *File) Open(fname string) error { - fl.FName = fname - return fl.Read() -} - -// CheckUpdate checks if file has been modified -- returns true if so -func (fl *File) CheckUpdate() bool { - st, err := os.Stat(fl.FName) - if err != nil { - return false - } - return st.ModTime().After(fl.ModTime) -} - -// Read reads data from file -func (fl *File) Read() error { - st, err := os.Stat(fl.FName) - if err != nil { - return err - } - fl.ModTime = st.ModTime() - f, err := os.Open(fl.FName) - if err != nil { - return err - } - defer f.Close() - - if fl.Data != nil { - fl.Data = fl.Data[:0] - } - - scan := bufio.NewScanner(f) - ln := 0 - for scan.Scan() { - s := string(scan.Bytes()) - var fd []string - if fl.Commas { - fd = strings.Split(s, ",") - } else { - fd = strings.Split(s, "\t") - } - if ln == 0 { - if len(fd) == 0 || strings.Count(s, ",") > strings.Count(s, "\t") { - fl.Commas = true - fd = strings.Split(s, ",") - } - fl.Heads = fd - fl.Widths = make([]int, len(fl.Heads)) - fl.FitWidths(fd) - ln++ - continue - } - fl.Data = append(fl.Data, fd) - fl.FitWidths(fd) - ln++ - } - fl.Rows = ln - 1 // skip header - return err -} - -// FitWidths expands widths given current set of fields -func (fl *File) FitWidths(fd []string) { - nw := len(fl.Widths) - for i, f := range fd { - if i >= nw { - break - } - w := max(fl.Widths[i], len(f)) - fl.Widths[i] = w - } -} - -///////////////////////////////////////////////////////////////// -// Files - -// Open opens all files -func (fl *Files) Open(fnms []string) { - for _, fn := range fnms { - f := &File{} - err := f.Open(fn) - if err == nil { - *fl = append(*fl, f) - } - } -} - -// CheckUpdates check for any updated files, re-read if so -- returns true if so -func (fl *Files) CheckUpdates() bool { - got := false - for _, f := range *fl { - if f.CheckUpdate() { - f.Read() - got = true - } - } - return got -} diff --git a/tensor/cmd/ttail/help.go b/tensor/cmd/ttail/help.go deleted file mode 100644 index 101d481357..0000000000 --- a/tensor/cmd/ttail/help.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2020, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import "github.com/nsf/termbox-go" - -func (tm *Term) Help() { - termbox.Clear(termbox.ColorDefault, termbox.ColorDefault) - ln := 0 - tm.DrawStringDef(0, ln, "Key(s) Function") - ln++ - tm.DrawStringDef(0, ln, "--------------------------------------------------------------") - ln++ - tm.DrawStringDef(0, ln, "spc,n page down") - ln++ - tm.DrawStringDef(0, ln, "p page up") - ln++ - tm.DrawStringDef(0, ln, "f scroll right-hand panel to the right") - ln++ - tm.DrawStringDef(0, ln, "b scroll right-hand panel to the left") - ln++ - tm.DrawStringDef(0, ln, "w widen the left-hand panel of columns") - ln++ - tm.DrawStringDef(0, ln, "s shrink the left-hand panel of columns") - ln++ - tm.DrawStringDef(0, ln, "t toggle tail-mode (auto updating as file grows) on/off") - ln++ - tm.DrawStringDef(0, ln, "a jump to top") - ln++ - tm.DrawStringDef(0, ln, "e jump to end") - ln++ - tm.DrawStringDef(0, ln, "v rotate down through the list of files (if not all displayed)") - ln++ - tm.DrawStringDef(0, ln, "u rotate up through the list of files (if not all displayed)") - ln++ - tm.DrawStringDef(0, ln, "m more minimum lines per file -- increase amount shown of each file") - ln++ - tm.DrawStringDef(0, ln, "l less minimum lines per file -- decrease amount shown of each file") - ln++ - tm.DrawStringDef(0, ln, "d toggle display of file names") - ln++ - tm.DrawStringDef(0, ln, "c toggle display of column numbers instead of names") - ln++ - tm.DrawStringDef(0, ln, "q quit") - ln++ - termbox.Flush() -} diff --git a/tensor/cmd/ttail/term.go b/tensor/cmd/ttail/term.go deleted file mode 100644 index bb9e3f92d9..0000000000 --- a/tensor/cmd/ttail/term.go +++ /dev/null @@ -1,369 +0,0 @@ -// Copyright (c) 2020, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "fmt" - "image" - "sync" - - termbox "github.com/nsf/termbox-go" -) - -// Term represents the terminal display -- has all drawing routines -// and all display data. See Tail for two diff display modes. -type Term struct { - - // size of terminal - Size image.Point `desc:"size of terminal"` - - // number of fixed (non-scrolling) columns on left - FixCols int `desc:"number of fixed (non-scrolling) columns on left"` - - // starting column index -- relative to FixCols - ColSt int `desc:"starting column index -- relative to FixCols"` - - // starting row index -- for !Tail mode - RowSt int `desc:"starting row index -- for !Tail mode"` - - // row from end -- for Tail mode - RowFromEnd int `desc:"row from end (relative to RowsPer) -- for Tail mode"` - - // starting index into files (if too many to display) - FileSt int `desc:"starting index into files (if too many to display)"` - - // number of files to display (if too many to display) - NFiles int `desc:"number of files to display (if too many to display)"` - - // minimum number of lines per file - MinLines int `desc:"minimum number of lines per file"` - - // maximum column width (1/4 of term width) - MaxWd int `desc:"maximum column width (1/4 of term width)"` - - // max number of rows across all files - MaxRows int `desc:"max number of rows across all files"` - - // number of Y rows per file total: Size.Y / len(TheFiles) - YPer int `desc:"number of Y rows per file total: Size.Y / len(TheFiles)"` - - // rows of data per file (subtracting header, filename) - RowsPer int `desc:"rows of data per file (subtracting header, filename)"` - - // if true, print filename - ShowFName bool `desc:"if true, print filename"` - - // if true, display is synchronized by the last row for each file, and otherwise it is synchronized by the starting row. Tail also checks for file updates - Tail bool `desc:"if true, display is synchronized by the last row for each file, and otherwise it is synchronized by the starting row. Tail also checks for file updates"` - - // display column numbers instead of names - ColNums bool `desc:"display column numbers instead of names"` - - // draw mutex - Mu sync.Mutex `desc:"draw mutex"` -} - -// TheTerm is the terminal instance -var TheTerm Term - -// Draw draws the current terminal display -func (tm *Term) Draw() error { - tm.Mu.Lock() - defer tm.Mu.Unlock() - - err := termbox.Clear(termbox.ColorDefault, termbox.ColorDefault) - if err != nil { - return err - } - - w, h := termbox.Size() - tm.Size.X = w - tm.Size.Y = h - tm.MaxWd = tm.Size.X / 4 - - if tm.MinLines == 0 { - tm.MinLines = min(5, tm.Size.Y-1) - } - - nf := len(TheFiles) - if nf == 0 { - return fmt.Errorf("No files") - } - ysz := tm.Size.Y - 1 // status line - tm.YPer = ysz / nf - tm.NFiles = nf - - if tm.YPer < tm.MinLines { - tm.NFiles = ysz / tm.MinLines - tm.YPer = tm.MinLines - } - if tm.NFiles+tm.FileSt > nf { - tm.FileSt = max(0, nf-tm.NFiles) - } - - tm.RowsPer = tm.YPer - 1 - if tm.ShowFName { - tm.RowsPer-- - } - sty := 0 - mxrows := 0 - for fi := 0; fi < tm.NFiles; fi++ { - ffi := tm.FileSt + fi - if ffi >= nf { - break - } - fl := TheFiles[ffi] - tm.DrawFile(fl, sty) - sty += tm.YPer - mxrows = max(mxrows, fl.Rows) - } - tm.MaxRows = mxrows - - tm.StatusLine() - - termbox.Flush() - return nil -} - -// StatusLine renders the status line at bottom -func (tm *Term) StatusLine() { - pos := tm.RowSt - if tm.Tail { - pos = tm.RowFromEnd - } - stat := fmt.Sprintf("Tail: %v\tPos: %d\tMaxRows: %d\tNFile: %d\tFileSt: %d\t h = help [spc,n,p,r,f,l,b,w,s,t,a,e,v,u,m,l,c,q] ", tm.Tail, pos, tm.MaxRows, len(TheFiles), tm.FileSt) - tm.DrawString(0, tm.Size.Y-1, stat, len(stat), termbox.AttrReverse, termbox.AttrReverse) -} - -// NextPage moves down a page -func (tm *Term) NextPage() error { - if tm.Tail { - mn := min(-(tm.MaxRows - tm.RowsPer), 0) - tm.RowFromEnd = min(tm.RowFromEnd+tm.RowsPer, 0) - tm.RowFromEnd = max(tm.RowFromEnd, mn) - } else { - tm.RowSt = min(tm.RowSt+tm.RowsPer, tm.MaxRows-tm.RowsPer) - tm.RowSt = max(tm.RowSt, 0) - } - return tm.Draw() -} - -// PrevPage moves up a page -func (tm *Term) PrevPage() error { - if tm.Tail { - mn := min(-(tm.MaxRows - tm.RowsPer), 0) - tm.RowFromEnd = min(tm.RowFromEnd-tm.RowsPer, 0) - tm.RowFromEnd = max(tm.RowFromEnd, mn) - } else { - tm.RowSt = max(tm.RowSt-tm.RowsPer, 0) - tm.RowSt = min(tm.RowSt, tm.MaxRows-tm.RowsPer) - } - return tm.Draw() -} - -// NextLine moves down a page -func (tm *Term) NextLine() error { - if tm.Tail { - mn := min(-(tm.MaxRows - tm.RowsPer), 0) - tm.RowFromEnd = min(tm.RowFromEnd+1, 0) - tm.RowFromEnd = max(tm.RowFromEnd, mn) - } else { - tm.RowSt = min(tm.RowSt+1, tm.MaxRows-tm.RowsPer) - tm.RowSt = max(tm.RowSt, 0) - } - return tm.Draw() -} - -// PrevLine moves up a page -func (tm *Term) PrevLine() error { - if tm.Tail { - mn := min(-(tm.MaxRows - tm.RowsPer), 0) - tm.RowFromEnd = min(tm.RowFromEnd-1, 0) - tm.RowFromEnd = max(tm.RowFromEnd, mn) - } else { - tm.RowSt = max(tm.RowSt-1, 0) - tm.RowSt = min(tm.RowSt, tm.MaxRows-tm.RowsPer) - } - return tm.Draw() -} - -// Top moves to starting row = 0 -func (tm *Term) Top() error { - mn := min(-(tm.MaxRows - tm.RowsPer), 0) - tm.RowFromEnd = mn - tm.RowSt = 0 - return tm.Draw() -} - -// End moves row start to last position in longest file -func (tm *Term) End() error { - mx := max(tm.MaxRows-tm.RowsPer, 0) - tm.RowFromEnd = 0 - tm.RowSt = mx - return tm.Draw() -} - -// ScrollRight scrolls columns to right -func (tm *Term) ScrollRight() error { - tm.ColSt++ // no obvious max - return tm.Draw() -} - -// ScrollLeft scrolls columns to left -func (tm *Term) ScrollLeft() error { - tm.ColSt = max(tm.ColSt-1, 0) - return tm.Draw() -} - -// FixRight increases number of fixed columns -func (tm *Term) FixRight() error { - tm.FixCols++ // no obvious max - return tm.Draw() -} - -// FixLeft decreases number of fixed columns -func (tm *Term) FixLeft() error { - tm.FixCols = max(tm.FixCols-1, 0) - return tm.Draw() -} - -// FilesNext moves down in list of files to display -func (tm *Term) FilesNext() error { - nf := len(TheFiles) - tm.FileSt = min(tm.FileSt+1, nf-tm.NFiles) - tm.FileSt = max(tm.FileSt, 0) - return tm.Draw() -} - -// FilesPrev moves up in list of files to display -func (tm *Term) FilesPrev() error { - nf := len(TheFiles) - tm.FileSt = max(tm.FileSt-1, 0) - tm.FileSt = min(tm.FileSt, nf-tm.NFiles) - return tm.Draw() -} - -// MoreMinLines increases minimum number of lines per file -func (tm *Term) MoreMinLines() error { - tm.MinLines++ - return tm.Draw() -} - -// LessMinLines decreases minimum number of lines per file -func (tm *Term) LessMinLines() error { - tm.MinLines-- - tm.MinLines = max(3, tm.MinLines) - return tm.Draw() -} - -// ToggleNames toggles whether file names are shown -func (tm *Term) ToggleNames() error { - tm.ShowFName = !tm.ShowFName - return tm.Draw() -} - -// ToggleTail toggles Tail mode -func (tm *Term) ToggleTail() error { - tm.Tail = !tm.Tail - return tm.Draw() -} - -// ToggleColNums toggles ColNums mode -func (tm *Term) ToggleColNums() error { - tm.ColNums = !tm.ColNums - return tm.Draw() -} - -// TailCheck does tail update check -- returns true if updated -func (tm *Term) TailCheck() bool { - if !tm.Tail { - return false - } - tm.Mu.Lock() - update := TheFiles.CheckUpdates() - tm.Mu.Unlock() - if !update { - return false - } - tm.Draw() - return true -} - -// DrawFile draws one file, starting at given y offset -func (tm *Term) DrawFile(fl *File, sty int) { - tdo := (fl.Rows - tm.RowsPer) + tm.RowFromEnd // tail data offset for this file - tdo = max(0, tdo) - rst := min(tm.RowSt, fl.Rows-tm.RowsPer) - rst = max(0, rst) - stx := 0 - for ci, hs := range fl.Heads { - if !(ci < tm.FixCols || ci >= tm.FixCols+tm.ColSt) { - continue - } - my := sty - if tm.ShowFName { - tm.DrawString(0, my, fl.FName, tm.Size.X, termbox.AttrReverse, termbox.AttrReverse) - my++ - } - wmax := min(fl.Widths[ci], tm.MaxWd) - if tm.ColNums { - hs = fmt.Sprintf("%d", ci) - } - tm.DrawString(stx, my, hs, wmax, termbox.AttrReverse, termbox.AttrReverse) - if ci == tm.FixCols-1 { - tm.DrawString(stx+wmax+1, my, "|", 1, termbox.AttrReverse, termbox.AttrReverse) - } - my++ - for ri := 0; ri < tm.RowsPer; ri++ { - var di int - if tm.Tail { - di = tdo + ri - } else { - di = rst + ri - } - if di >= len(fl.Data) || di < 0 { - continue - } - dr := fl.Data[di] - if ci >= len(dr) { - break - } - ds := dr[ci] - tm.DrawString(stx, my+ri, ds, wmax, termbox.ColorDefault, termbox.ColorDefault) - if ci == tm.FixCols-1 { - tm.DrawString(stx+wmax+1, my+ri, "|", 1, termbox.AttrReverse, termbox.AttrReverse) - } - } - stx += wmax + 1 - if ci == tm.FixCols-1 { - stx += 2 - } - if stx >= tm.Size.X { - break - } - } -} - -// DrawStringDef draws string at given position, using default colors -func (tm *Term) DrawStringDef(x, y int, s string) { - tm.DrawString(x, y, s, tm.Size.X, termbox.ColorDefault, termbox.ColorDefault) -} - -// DrawString draws string at given position, using given attributes -func (tm *Term) DrawString(x, y int, s string, maxlen int, fg, bg termbox.Attribute) { - if y >= tm.Size.Y || y < 0 { - return - } - for i, r := range s { - if i >= maxlen { - break - } - xp := x + i - if xp >= tm.Size.X || xp < 0 { - continue - } - termbox.SetCell(xp, y, r, fg, bg) - } -} diff --git a/tensor/cmd/ttail/testdata/RA25_Base_epc.csv b/tensor/cmd/ttail/testdata/RA25_Base_epc.csv deleted file mode 100644 index cb7d39f154..0000000000 --- a/tensor/cmd/ttail/testdata/RA25_Base_epc.csv +++ /dev/null @@ -1,83 +0,0 @@ -|Run |Epoch #SSE #AvgSSE #PctErr #PctCor #CosDiff #PerTrlMSec #Hidden1_ActAvg #Hidden2_ActAvg #Output_ActAvg -0 0 166.9 0.2782 1 0 -6.826e-05 0 0.172 0.1579 0.1927 -0 1 150.8 0.2513 1 0 0.09688 1.014 0.1711 0.1562 0.1967 -0 2 128.2 0.2136 1 0 0.2299 0.989 0.1704 0.1547 0.2003 -0 3 99.63 0.166 1 0 0.3907 0.9587 0.1697 0.1533 0.2034 -0 4 80.48 0.1341 1 0 0.505 0.9822 0.1695 0.1522 0.2062 -0 5 70.38 0.1173 1 0 0.5609 1.84 0.1679 0.1502 0.2109 -0 6 51.67 0.08612 1 0 0.649 0.9591 0.1672 0.149 0.2128 -0 7 48.61 0.08102 1 0 0.6861 1.007 0.1669 0.1483 0.2146 -0 8 35.82 0.0597 1 0 0.7498 0.9346 0.1665 0.1476 0.2161 -0 9 30.3 0.0505 1 0 0.7844 1.172 0.166 0.1466 0.2174 -0 10 24.64 0.04106 0.9583 0.04167 0.8149 1.758 0.165 0.1455 0.2197 -0 11 22.18 0.03696 0.9167 0.08333 0.8343 0.9923 0.1637 0.1446 0.2206 -0 12 19.16 0.03194 0.9167 0.08333 0.8486 0.9288 0.1621 0.1436 0.2215 -0 13 15.54 0.02589 0.7917 0.2083 0.874 0.8765 0.1607 0.1427 0.2222 -0 14 8.525 0.01421 0.6667 0.3333 0.9099 0.9615 0.1595 0.1419 0.2229 -0 15 5.144 0.008574 0.5417 0.4583 0.9273 1.866 0.1572 0.1405 0.224 -0 16 5.41 0.009016 0.625 0.375 0.9353 0.9055 0.1568 0.1402 0.2244 -0 17 3.999 0.006665 0.3333 0.6667 0.9392 0.9423 0.1557 0.1397 0.2248 -0 18 3.296 0.005493 0.3333 0.6667 0.947 0.951 0.1549 0.1391 0.2252 -0 19 2.465 0.004108 0.3333 0.6667 0.9579 0.9095 0.1542 0.1386 0.2255 -0 20 1.558 0.002597 0.125 0.875 0.9621 1.749 0.1531 0.1382 0.226 -0 21 2.863 0.004772 0.2917 0.7083 0.9514 0.941 0.1525 0.1387 0.2263 -0 22 2.785 0.004641 0.2917 0.7083 0.9544 0.9404 0.1521 0.1391 0.2265 -0 23 2.121 0.003535 0.2083 0.7917 0.9608 0.9664 0.1516 0.1395 0.2266 -0 24 0.725 0.001208 0.08333 0.9167 0.9686 1.012 0.1515 0.1399 0.2268 -0 25 0.6263 0.001044 0.08333 0.9167 0.9717 1.785 0.1504 0.1402 0.2271 -0 26 0.6039 0.001006 0.08333 0.9167 0.9747 0.9018 0.1502 0.1404 0.2272 -0 27 0.7179 0.001196 0.08333 0.9167 0.979 0.9236 0.15 0.1408 0.2273 -0 28 1.913 0.003189 0.125 0.875 0.9691 0.9136 0.1498 0.1412 0.2273 -0 29 1.123 0.001872 0.08333 0.9167 0.9767 0.9405 0.1496 0.1415 0.2274 -0 30 0.3265 0.0005441 0.04167 0.9583 0.9821 1.818 0.1491 0.1419 0.2275 -0 31 0.534 0.00089 0.08333 0.9167 0.9818 0.9664 0.1489 0.1419 0.2276 -0 32 0 0 0 1 0.987 0.9613 0.1491 0.1424 0.2276 -0 33 0.5611 0.0009352 0.04167 0.9583 0.9847 0.9222 0.149 0.1424 0.2277 -0 34 0 0 0 1 0.9883 0.8654 0.149 0.1427 0.2277 -0 35 0 0 0 1 0.9905 1.788 0.1485 0.1427 0.2278 -0 36 0 0 0 1 0.9918 1.166 0.1482 0.1428 0.2278 -0 37 0 0 0 1 0.9917 1.08 0.1483 0.1429 0.2278 -0 38 0 0 0 1 0.9942 0.9023 0.1482 0.1429 0.2278 -1 0 184.2 0.307 1 0 -0.0007995 1.052 0.1473 0.1438 0.1931 -1 1 159.5 0.2659 1 0 0.08436 1.276 0.1491 0.1447 0.197 -1 2 130.9 0.2181 1 0 0.224 0.8997 0.151 0.1454 0.2006 -1 3 105.8 0.1763 1 0 0.3543 0.8926 0.1524 0.1458 0.2037 -1 4 94.86 0.1581 1 0 0.4107 0.7913 0.1536 0.1458 0.2064 -1 5 70.86 0.1181 1 0 0.5544 1.62 0.1559 0.1447 0.2111 -1 6 56.41 0.09402 1 0 0.6386 0.8755 0.1568 0.1443 0.213 -1 7 50.94 0.0849 1 0 0.665 0.7786 0.1576 0.1437 0.2147 -1 8 36.91 0.06152 1 0 0.7437 0.8465 0.1581 0.1433 0.2162 -1 9 31.66 0.05276 0.9167 0.08333 0.7763 0.8259 0.1587 0.1431 0.2175 -1 10 22.35 0.03725 0.9167 0.08333 0.8245 1.542 0.1599 0.1423 0.2198 -1 11 21.45 0.03574 0.875 0.125 0.832 0.7645 0.1588 0.1415 0.2207 -1 12 18.21 0.03036 0.7083 0.2917 0.8523 0.7664 0.1576 0.1406 0.2215 -1 13 8.76 0.0146 0.6667 0.3333 0.9095 0.8084 0.1567 0.1398 0.2223 -1 14 8.257 0.01376 0.5833 0.4167 0.9089 0.8231 0.1562 0.1393 0.2229 -1 15 9.211 0.01535 0.5417 0.4583 0.9082 1.565 0.1546 0.138 0.224 -1 16 5.019 0.008365 0.375 0.625 0.9365 0.8142 0.1538 0.1374 0.2245 -1 17 4.707 0.007845 0.375 0.625 0.9412 0.9483 0.1534 0.137 0.2249 -1 18 4.218 0.007031 0.375 0.625 0.9409 0.8317 0.1527 0.1369 0.2252 -1 19 6.631 0.01105 0.4583 0.5417 0.9289 0.8591 0.1524 0.1375 0.2255 -1 20 6.045 0.01008 0.375 0.625 0.9316 1.517 0.1518 0.1384 0.2261 -1 21 2.488 0.004147 0.2083 0.7917 0.9559 0.8073 0.1513 0.1386 0.2263 -1 22 2.38 0.003966 0.2083 0.7917 0.9605 0.7898 0.151 0.1389 0.2265 -1 23 2.126 0.003543 0.25 0.75 0.9617 0.8156 0.1506 0.139 0.2267 -1 24 1.41 0.00235 0.125 0.875 0.9669 0.7917 0.1505 0.1393 0.2268 -1 25 1.395 0.002325 0.125 0.875 0.9704 1.78 0.1497 0.1394 0.2271 -1 26 2.934 0.00489 0.08333 0.9167 0.9617 0.8272 0.1496 0.1396 0.2272 -1 27 0.7005 0.001167 0.08333 0.9167 0.9776 0.7539 0.1493 0.1397 0.2273 -1 28 0.2999 0.0004998 0.04167 0.9583 0.9795 0.8755 0.1493 0.1398 0.2273 -1 29 0.2931 0.0004886 0.04167 0.9583 0.9816 0.7872 0.1494 0.1401 0.2274 -1 30 0.2556 0.000426 0.04167 0.9583 0.9838 1.507 0.1489 0.1403 0.2275 -1 31 0.2503 0.0004172 0.04167 0.9583 0.9855 0.8297 0.1486 0.1404 0.2276 -1 32 0 0 0 1 0.9881 0.801 0.1485 0.1403 0.2276 -1 33 0 0 0 1 0.9887 1.015 0.1482 0.1404 0.2277 -1 34 0 0 0 1 0.9893 0.8483 0.1481 0.1402 0.2277 -1 35 0 0 0 1 0.989 1.644 0.1481 0.1404 0.2278 -1 36 0.3285 0.0005475 0.04167 0.9583 0.9903 0.822 0.1482 0.1403 0.2278 -1 37 0.2702 0.0004504 0.04167 0.9583 0.9908 0.7749 0.148 0.1404 0.2278 -1 38 0 0 0 1 0.9916 0.804 0.1479 0.1404 0.2278 -1 39 0 0 0 1 0.9918 0.7646 0.1479 0.1405 0.2279 -1 40 0 0 0 1 0.9915 1.482 0.1477 0.1403 0.2279 -1 41 0 0 0 1 0.9927 0.798 0.1476 0.1402 0.2279 -1 42 0 0 0 1 0.993 0.789 0.1474 0.1402 0.2279 diff --git a/tensor/cmd/ttail/testdata/RA25_Base_run.csv b/tensor/cmd/ttail/testdata/RA25_Base_run.csv deleted file mode 100644 index c91031fb70..0000000000 --- a/tensor/cmd/ttail/testdata/RA25_Base_run.csv +++ /dev/null @@ -1,3 +0,0 @@ -|Run $Params #FirstZero #SSE #AvgSSE #PctErr #PctCor #CosDiff -0 Base 32 0 0 0 1 0.9913 -1 Base 32 0 0 0 1 0.9921 diff --git a/tensor/cmd/ttail/testdata/RA25_Base_trl.csv b/tensor/cmd/ttail/testdata/RA25_Base_trl.csv deleted file mode 100644 index c8edf640bd..0000000000 --- a/tensor/cmd/ttail/testdata/RA25_Base_trl.csv +++ /dev/null @@ -1,1969 +0,0 @@ -|Run |Epoch |Trial $TrialName #Err #SSE #AvgSSE #CosDiff -0 0 0 evt_23 1 4.987 0.1995 0.3507 -0 0 1 evt_16 1 4.404 0.1762 0.4012 -0 0 2 evt_19 1 8.244 0.3297 -0.1512 -0 0 3 evt_5 1 6.687 0.2675 0.01344 -0 0 4 evt_4 1 5.764 0.2305 0.1708 -0 0 5 evt_20 1 6.068 0.2427 0.06015 -0 0 6 evt_3 1 5.477 0.2191 0.169 -0 0 7 evt_22 1 6.643 0.2657 -0.04358 -0 0 8 evt_17 1 6.625 0.265 0.05092 -0 0 9 evt_8 1 8.153 0.3261 -0.191 -0 0 10 evt_2 1 6.614 0.2646 -0.0324 -0 0 11 evt_12 1 8.461 0.3385 -0.1213 -0 0 12 evt_15 1 5.418 0.2167 0.2394 -0 0 13 evt_10 1 7.417 0.2967 -0.05132 -0 0 14 evt_21 1 7.87 0.3148 -0.225 -0 0 15 evt_11 1 6.572 0.2629 0.01696 -0 0 16 evt_7 1 6.201 0.248 -0.05718 -0 0 17 evt_9 1 10.41 0.4166 -0.385 -0 0 18 evt_1 1 3.207 0.1283 0.4721 -0 0 19 evt_0 1 8.436 0.3375 -0.1868 -0 0 20 evt_18 1 8.852 0.3541 -0.1523 -0 0 21 evt_13 1 7.662 0.3065 -0.03814 -0 0 22 evt_6 1 7.588 0.3035 -0.1115 -0 0 23 evt_14 1 9.174 0.367 -0.1995 -0 1 0 evt_19 1 7.755 0.3102 -0.1418 -0 1 1 evt_7 1 5.527 0.2211 0.0889 -0 1 2 evt_1 1 3.485 0.1394 0.4461 -0 1 3 evt_18 1 7.225 0.289 -0.1589 -0 1 4 evt_4 1 4.766 0.1906 0.246 -0 1 5 evt_23 1 5.063 0.2025 0.2807 -0 1 6 evt_16 1 4.595 0.1838 0.3403 -0 1 7 evt_12 1 8.504 0.3402 -0.1892 -0 1 8 evt_3 1 6.178 0.2471 0.07102 -0 1 9 evt_11 1 7.176 0.287 0.006864 -0 1 10 evt_21 1 4.999 0.2 0.1859 -0 1 11 evt_15 1 2.265 0.09061 0.6244 -0 1 12 evt_13 1 8.72 0.3488 -0.1602 -0 1 13 evt_2 1 7.099 0.2839 -0.02414 -0 1 14 evt_14 1 9.081 0.3632 -0.1876 -0 1 15 evt_20 1 5.58 0.2232 0.2259 -0 1 16 evt_10 1 5.887 0.2355 0.1911 -0 1 17 evt_22 1 8.016 0.3206 -0.0956 -0 1 18 evt_8 1 3.692 0.1477 0.4185 -0 1 19 evt_9 1 10.08 0.4033 -0.3464 -0 1 20 evt_6 1 5.567 0.2227 0.2184 -0 1 21 evt_0 1 6.596 0.2638 0.1015 -0 1 22 evt_5 1 6.51 0.2604 0.06273 -0 1 23 evt_17 1 6.386 0.2554 0.1206 -0 2 0 evt_6 1 5.546 0.2218 0.2319 -0 2 1 evt_10 1 4.844 0.1937 0.2885 -0 2 2 evt_21 1 5.449 0.218 0.2001 -0 2 3 evt_19 1 6.771 0.2708 0.05133 -0 2 4 evt_15 1 1.84 0.0736 0.663 -0 2 5 evt_22 1 4.309 0.1724 0.324 -0 2 6 evt_18 1 5.539 0.2216 0.2085 -0 2 7 evt_8 1 6.994 0.2797 0.02964 -0 2 8 evt_20 1 6.622 0.2649 0.06915 -0 2 9 evt_23 1 4.386 0.1754 0.3186 -0 2 10 evt_11 1 7.192 0.2877 -0.02623 -0 2 11 evt_7 1 7.234 0.2894 0.01685 -0 2 12 evt_16 1 4.104 0.1642 0.355 -0 2 13 evt_2 1 6.876 0.2751 0.008707 -0 2 14 evt_5 1 3.141 0.1256 0.5656 -0 2 15 evt_4 1 6.188 0.2475 0.1321 -0 2 16 evt_1 1 3.661 0.1464 0.4574 -0 2 17 evt_17 1 4.167 0.1667 0.4192 -0 2 18 evt_9 1 7.153 0.2861 0.017 -0 2 19 evt_0 1 4.573 0.1829 0.3173 -0 2 20 evt_3 1 5.523 0.2209 0.1814 -0 2 21 evt_13 1 6.35 0.254 0.09671 -0 2 22 evt_12 1 5.525 0.221 0.215 -0 2 23 evt_14 1 4.181 0.1672 0.3762 -0 3 0 evt_17 1 2.875 0.115 0.6042 -0 3 1 evt_12 1 2.785 0.1114 0.5745 -0 3 2 evt_11 1 6.41 0.2564 0.1317 -0 3 3 evt_8 1 2.365 0.09459 0.6512 -0 3 4 evt_18 1 4.376 0.175 0.3832 -0 3 5 evt_15 1 2.305 0.0922 0.6653 -0 3 6 evt_19 1 5.797 0.2319 0.1566 -0 3 7 evt_23 1 1.926 0.07706 0.6961 -0 3 8 evt_3 1 4.885 0.1954 0.2955 -0 3 9 evt_7 1 4.441 0.1777 0.3118 -0 3 10 evt_10 1 4.175 0.167 0.3236 -0 3 11 evt_2 1 5.956 0.2382 0.09958 -0 3 12 evt_1 1 3.126 0.125 0.5257 -0 3 13 evt_13 1 6.02 0.2408 0.006573 -0 3 14 evt_4 1 7.919 0.3167 -0.02661 -0 3 15 evt_5 1 2.571 0.1028 0.607 -0 3 16 evt_14 1 2.185 0.08742 0.651 -0 3 17 evt_9 1 4.291 0.1716 0.3929 -0 3 18 evt_6 1 3.822 0.1529 0.4384 -0 3 19 evt_0 1 4.118 0.1647 0.4037 -0 3 20 evt_20 1 2.844 0.1138 0.5502 -0 3 21 evt_22 1 4.38 0.1752 0.3756 -0 3 22 evt_16 1 4.623 0.1849 0.2926 -0 3 23 evt_21 1 5.433 0.2173 0.2655 -0 4 0 evt_12 1 1.158 0.04631 0.794 -0 4 1 evt_15 1 1.064 0.04258 0.8292 -0 4 2 evt_0 1 3.675 0.147 0.4349 -0 4 3 evt_16 1 4.639 0.1856 0.2735 -0 4 4 evt_10 1 3.786 0.1514 0.4992 -0 4 5 evt_1 1 0.8369 0.03348 0.8278 -0 4 6 evt_11 1 5.656 0.2262 0.1955 -0 4 7 evt_23 1 2.149 0.08596 0.7007 -0 4 8 evt_14 1 0.9677 0.03871 0.8428 -0 4 9 evt_6 1 4.457 0.1783 0.3672 -0 4 10 evt_21 1 4.95 0.198 0.3177 -0 4 11 evt_5 1 1.337 0.0535 0.7485 -0 4 12 evt_19 1 6.188 0.2475 0.1532 -0 4 13 evt_17 1 3.39 0.1356 0.5573 -0 4 14 evt_3 1 3.881 0.1553 0.357 -0 4 15 evt_9 1 3.644 0.1458 0.4914 -0 4 16 evt_4 1 2.471 0.09885 0.6142 -0 4 17 evt_18 1 4.006 0.1602 0.4238 -0 4 18 evt_8 1 0.3511 0.01404 0.8489 -0 4 19 evt_13 1 5.599 0.224 0.2402 -0 4 20 evt_2 1 5.204 0.2082 0.2427 -0 4 21 evt_20 1 3.412 0.1365 0.4941 -0 4 22 evt_22 1 4.26 0.1704 0.4044 -0 4 23 evt_7 1 3.392 0.1357 0.4619 -0 5 0 evt_20 1 3.292 0.1317 0.515 -0 5 1 evt_22 1 5.973 0.2389 0.2058 -0 5 2 evt_19 1 5.825 0.233 0.23 -0 5 3 evt_3 1 3.557 0.1423 0.4045 -0 5 4 evt_21 1 3.907 0.1563 0.3907 -0 5 5 evt_11 1 3.066 0.1226 0.5107 -0 5 6 evt_7 1 2.531 0.1012 0.5871 -0 5 7 evt_6 1 4.466 0.1786 0.3196 -0 5 8 evt_5 1 1.626 0.06505 0.7379 -0 5 9 evt_14 1 0.6104 0.02442 0.9113 -0 5 10 evt_15 1 0.9404 0.03761 0.8697 -0 5 11 evt_0 1 5.178 0.2071 0.266 -0 5 12 evt_1 1 0.811 0.03244 0.8456 -0 5 13 evt_18 1 2.711 0.1084 0.5587 -0 5 14 evt_17 1 1.325 0.053 0.8125 -0 5 15 evt_13 1 1.309 0.05236 0.7749 -0 5 16 evt_16 1 2.597 0.1039 0.555 -0 5 17 evt_23 1 1.821 0.07285 0.7483 -0 5 18 evt_2 1 6.017 0.2407 0.1827 -0 5 19 evt_10 1 4 0.16 0.442 -0 5 20 evt_8 1 0.3694 0.01477 0.8734 -0 5 21 evt_4 1 5.303 0.2121 0.244 -0 5 22 evt_12 1 0.5682 0.02273 0.8524 -0 5 23 evt_9 1 2.581 0.1032 0.6234 -0 6 0 evt_7 1 2.418 0.0967 0.6271 -0 6 1 evt_0 1 3.275 0.131 0.52 -0 6 2 evt_12 1 0.2747 0.01099 0.8746 -0 6 3 evt_3 1 3.416 0.1366 0.5063 -0 6 4 evt_14 1 0.5987 0.02395 0.9133 -0 6 5 evt_17 1 0.581 0.02324 0.8919 -0 6 6 evt_6 1 4.054 0.1622 0.3584 -0 6 7 evt_19 1 4.86 0.1944 0.2911 -0 6 8 evt_20 1 1.509 0.06036 0.7581 -0 6 9 evt_8 1 0.7853 0.03141 0.8895 -0 6 10 evt_2 1 4.59 0.1836 0.289 -0 6 11 evt_10 1 4.06 0.1624 0.3636 -0 6 12 evt_4 1 2.544 0.1018 0.6163 -0 6 13 evt_13 1 0.5964 0.02386 0.8492 -0 6 14 evt_22 1 4.588 0.1835 0.2939 -0 6 15 evt_16 1 2.746 0.1099 0.5495 -0 6 16 evt_1 1 0.4436 0.01775 0.8597 -0 6 17 evt_9 1 2.24 0.08961 0.6306 -0 6 18 evt_18 1 1.333 0.05331 0.6394 -0 6 19 evt_23 1 0.528 0.02112 0.8388 -0 6 20 evt_11 1 1.722 0.06887 0.7235 -0 6 21 evt_21 1 2.61 0.1044 0.5857 -0 6 22 evt_15 1 0.9314 0.03726 0.8764 -0 6 23 evt_5 1 0.9664 0.03866 0.8302 -0 7 0 evt_12 1 0.7732 0.03093 0.8712 -0 7 1 evt_7 1 2.507 0.1003 0.6157 -0 7 2 evt_14 1 0.5797 0.02319 0.9118 -0 7 3 evt_10 1 3.956 0.1583 0.3805 -0 7 4 evt_16 1 2.892 0.1157 0.5806 -0 7 5 evt_19 1 5.578 0.2231 0.1872 -0 7 6 evt_15 1 0.8329 0.03332 0.8904 -0 7 7 evt_0 1 1.357 0.05429 0.7603 -0 7 8 evt_9 1 2.682 0.1073 0.6151 -0 7 9 evt_13 1 0.4962 0.01985 0.8541 -0 7 10 evt_2 1 3.495 0.1398 0.4701 -0 7 11 evt_4 1 2.512 0.1005 0.6316 -0 7 12 evt_17 1 0.5907 0.02363 0.8984 -0 7 13 evt_23 1 0.82 0.0328 0.8308 -0 7 14 evt_21 1 2.227 0.08909 0.7007 -0 7 15 evt_6 1 2.269 0.09078 0.6405 -0 7 16 evt_8 1 0.5345 0.02138 0.8857 -0 7 17 evt_3 1 2.567 0.1027 0.6208 -0 7 18 evt_22 1 4.266 0.1706 0.3507 -0 7 19 evt_18 1 3.492 0.1397 0.4833 -0 7 20 evt_5 1 0.8604 0.03442 0.8552 -0 7 21 evt_20 1 1.064 0.04256 0.8041 -0 7 22 evt_1 1 0.8763 0.03505 0.8716 -0 7 23 evt_11 1 1.387 0.05548 0.7556 -0 8 0 evt_2 1 0.7534 0.03014 0.7429 -0 8 1 evt_5 1 0.5287 0.02115 0.8732 -0 8 2 evt_15 1 0.7478 0.02991 0.8971 -0 8 3 evt_18 1 1.722 0.06889 0.6827 -0 8 4 evt_20 1 1.026 0.04105 0.8082 -0 8 5 evt_23 1 0.5428 0.02171 0.8328 -0 8 6 evt_13 1 0.652 0.02608 0.866 -0 8 7 evt_12 1 1.396 0.05582 0.7941 -0 8 8 evt_17 1 0.6077 0.02431 0.9101 -0 8 9 evt_21 1 2.154 0.08617 0.718 -0 8 10 evt_22 1 2.264 0.09056 0.6072 -0 8 11 evt_0 1 1.16 0.04638 0.7763 -0 8 12 evt_7 1 1.95 0.078 0.6917 -0 8 13 evt_9 1 3.398 0.1359 0.5024 -0 8 14 evt_6 1 1.742 0.06968 0.7442 -0 8 15 evt_1 1 0.7059 0.02824 0.8908 -0 8 16 evt_16 1 2.202 0.08808 0.6387 -0 8 17 evt_10 1 2.867 0.1147 0.5346 -0 8 18 evt_14 1 0.5605 0.02242 0.907 -0 8 19 evt_3 1 1.07 0.0428 0.7821 -0 8 20 evt_4 1 2.664 0.1066 0.643 -0 8 21 evt_8 1 0.4217 0.01687 0.9244 -0 8 22 evt_11 1 1.012 0.04049 0.7998 -0 8 23 evt_19 1 3.673 0.1469 0.4283 -0 9 0 evt_10 1 2.686 0.1074 0.5688 -0 9 1 evt_18 1 1.514 0.06057 0.7328 -0 9 2 evt_22 1 2.19 0.08759 0.6529 -0 9 3 evt_19 1 2.767 0.1107 0.5632 -0 9 4 evt_14 1 0.5463 0.02185 0.8977 -0 9 5 evt_0 1 1.131 0.04523 0.8227 -0 9 6 evt_2 1 0.7226 0.0289 0.8228 -0 9 7 evt_8 1 0.3987 0.01595 0.916 -0 9 8 evt_5 1 0.5388 0.02155 0.904 -0 9 9 evt_3 1 0.9747 0.03899 0.808 -0 9 10 evt_12 1 0.348 0.01392 0.8712 -0 9 11 evt_17 1 0.6213 0.02485 0.9085 -0 9 12 evt_16 1 1.671 0.06682 0.6922 -0 9 13 evt_1 1 0.747 0.02988 0.8396 -0 9 14 evt_15 1 0.4076 0.0163 0.9199 -0 9 15 evt_4 1 2.488 0.09951 0.6658 -0 9 16 evt_23 1 2.037 0.08148 0.686 -0 9 17 evt_13 1 0.3281 0.01312 0.8817 -0 9 18 evt_21 1 2.046 0.08186 0.7303 -0 9 19 evt_11 1 0.9319 0.03728 0.8259 -0 9 20 evt_9 1 1.321 0.05285 0.798 -0 9 21 evt_6 1 1.371 0.05483 0.7852 -0 9 22 evt_7 1 1.664 0.06657 0.7366 -0 9 23 evt_20 1 0.8528 0.03411 0.795 -0 10 0 evt_21 1 1.994 0.07977 0.7372 -0 10 1 evt_13 1 0.5774 0.0231 0.8617 -0 10 2 evt_1 1 0.3497 0.01399 0.9098 -0 10 3 evt_7 1 1.476 0.05905 0.7729 -0 10 4 evt_18 1 0.865 0.0346 0.8212 -0 10 5 evt_8 1 0.4244 0.01697 0.9003 -0 10 6 evt_22 1 1.432 0.05728 0.7597 -0 10 7 evt_4 1 1.369 0.05478 0.769 -0 10 8 evt_14 1 0.7858 0.03143 0.8856 -0 10 9 evt_6 1 1.211 0.04843 0.8052 -0 10 10 evt_5 1 0.2712 0.01085 0.9129 -0 10 11 evt_10 1 2.394 0.09575 0.6426 -0 10 12 evt_15 1 0.3542 0.01417 0.932 -0 10 13 evt_12 0 0 0 0.966 -0 10 14 evt_19 1 3.191 0.1276 0.4285 -0 10 15 evt_23 1 0.3276 0.01311 0.8784 -0 10 16 evt_9 1 1.257 0.05027 0.8101 -0 10 17 evt_2 1 0.3494 0.01398 0.8884 -0 10 18 evt_16 1 0.7751 0.031 0.8482 -0 10 19 evt_11 1 1.199 0.04795 0.7643 -0 10 20 evt_0 1 1.633 0.0653 0.7249 -0 10 21 evt_3 1 0.9179 0.03672 0.8212 -0 10 22 evt_17 1 0.345 0.0138 0.9257 -0 10 23 evt_20 1 1.138 0.04552 0.792 -0 11 0 evt_21 1 0.6499 0.026 0.8969 -0 11 1 evt_11 1 0.5663 0.02265 0.8673 -0 11 2 evt_6 1 1.136 0.04545 0.8154 -0 11 3 evt_12 0 0 0 0.9834 -0 11 4 evt_5 1 0.8049 0.03219 0.8679 -0 11 5 evt_13 1 0.9677 0.03871 0.8247 -0 11 6 evt_22 1 1.751 0.07005 0.6729 -0 11 7 evt_16 1 0.3291 0.01317 0.8927 -0 11 8 evt_1 1 0.3163 0.01265 0.9239 -0 11 9 evt_7 1 0.9878 0.03951 0.8392 -0 11 10 evt_8 0 0 0 0.9565 -0 11 11 evt_3 1 0.9297 0.03719 0.8349 -0 11 12 evt_20 1 1.121 0.04485 0.7992 -0 11 13 evt_17 1 0.3246 0.01299 0.9323 -0 11 14 evt_23 1 0.5015 0.02006 0.8748 -0 11 15 evt_2 1 0.8243 0.03297 0.829 -0 11 16 evt_18 1 0.6826 0.0273 0.8669 -0 11 17 evt_19 1 1.632 0.06529 0.6853 -0 11 18 evt_15 1 0.4293 0.01717 0.9214 -0 11 19 evt_4 1 1.006 0.04023 0.834 -0 11 20 evt_14 1 0.4945 0.01978 0.9082 -0 11 21 evt_10 1 2.554 0.1022 0.6098 -0 11 22 evt_0 1 2.154 0.08616 0.6858 -0 11 23 evt_9 1 2.012 0.0805 0.701 -0 12 0 evt_11 1 1.048 0.04191 0.8098 -0 12 1 evt_21 1 0.2589 0.01035 0.9286 -0 12 2 evt_5 1 0.4861 0.01944 0.9125 -0 12 3 evt_1 1 0.2857 0.01143 0.9313 -0 12 4 evt_10 1 2.782 0.1113 0.5618 -0 12 5 evt_4 1 0.6893 0.02757 0.8611 -0 12 6 evt_12 0 0 0 0.9844 -0 12 7 evt_0 1 1.891 0.07564 0.7365 -0 12 8 evt_17 1 0.3423 0.01369 0.9405 -0 12 9 evt_8 0 0 0 0.97 -0 12 10 evt_3 1 0.5991 0.02397 0.8331 -0 12 11 evt_13 1 1.704 0.06817 0.7192 -0 12 12 evt_2 1 0.3046 0.01218 0.9352 -0 12 13 evt_19 1 1.458 0.05834 0.7236 -0 12 14 evt_23 1 1.574 0.06294 0.6927 -0 12 15 evt_16 1 0.8044 0.03218 0.855 -0 12 16 evt_7 1 0.5573 0.02229 0.872 -0 12 17 evt_20 1 0.4365 0.01746 0.8676 -0 12 18 evt_6 1 0.6344 0.02538 0.834 -0 12 19 evt_22 1 0.823 0.03292 0.8452 -0 12 20 evt_18 1 0.6411 0.02564 0.8531 -0 12 21 evt_14 1 0.4781 0.01912 0.9097 -0 12 22 evt_15 1 0.3562 0.01425 0.9432 -0 12 23 evt_9 1 1.008 0.04031 0.8463 -0 13 0 evt_0 1 1.482 0.05928 0.7589 -0 13 1 evt_12 0 0 0 0.9833 -0 13 2 evt_18 0 0 0 0.9152 -0 13 3 evt_8 0 0 0 0.9771 -0 13 4 evt_2 0 0 0 0.968 -0 13 5 evt_5 1 0.4702 0.01881 0.9234 -0 13 6 evt_15 1 0.3401 0.0136 0.93 -0 13 7 evt_21 1 0.255 0.0102 0.9396 -0 13 8 evt_23 1 0.7531 0.03012 0.8689 -0 13 9 evt_10 1 1.648 0.0659 0.716 -0 13 10 evt_4 1 0.8163 0.03265 0.8554 -0 13 11 evt_14 1 0.4347 0.01739 0.9095 -0 13 12 evt_20 1 1.632 0.06526 0.7358 -0 13 13 evt_13 1 1.489 0.05957 0.7385 -0 13 14 evt_9 1 0.9664 0.03866 0.8539 -0 13 15 evt_16 1 0.2936 0.01175 0.8794 -0 13 16 evt_6 1 0.5515 0.02206 0.8825 -0 13 17 evt_11 1 0.5198 0.02079 0.8946 -0 13 18 evt_7 1 0.2781 0.01112 0.9258 -0 13 19 evt_1 0 0 0 0.9325 -0 13 20 evt_19 1 1.037 0.04146 0.822 -0 13 21 evt_17 1 0.2804 0.01121 0.9509 -0 13 22 evt_3 1 1.55 0.062 0.7545 -0 13 23 evt_22 1 0.7392 0.02957 0.8599 -0 14 0 evt_20 1 0.346 0.01384 0.9085 -0 14 1 evt_11 1 0.4654 0.01862 0.8978 -0 14 2 evt_8 0 0 0 0.9812 -0 14 3 evt_17 1 0.2702 0.01081 0.9541 -0 14 4 evt_21 0 0 0 0.9501 -0 14 5 evt_5 1 0.4243 0.01697 0.9324 -0 14 6 evt_15 0 0 0 0.9558 -0 14 7 evt_18 0 0 0 0.9335 -0 14 8 evt_22 1 0.6544 0.02618 0.8762 -0 14 9 evt_23 1 0.716 0.02864 0.8684 -0 14 10 evt_2 0 0 0 0.9692 -0 14 11 evt_13 1 0.2725 0.0109 0.8823 -0 14 12 evt_12 0 0 0 0.9875 -0 14 13 evt_14 1 0.4223 0.01689 0.9115 -0 14 14 evt_9 1 0.5483 0.02193 0.8805 -0 14 15 evt_7 1 0.2734 0.01094 0.9244 -0 14 16 evt_19 1 0.5704 0.02281 0.8643 -0 14 17 evt_4 1 0.3261 0.01304 0.8893 -0 14 18 evt_3 1 0.7449 0.0298 0.8656 -0 14 19 evt_16 1 0.332 0.01328 0.9247 -0 14 20 evt_6 1 0.471 0.01884 0.9067 -0 14 21 evt_1 0 0 0 0.9578 -0 14 22 evt_10 1 1.688 0.06752 0.7062 -0 14 23 evt_0 0 0 0 0.9086 -0 15 0 evt_8 0 0 0 0.9812 -0 15 1 evt_4 1 0.4024 0.0161 0.8805 -0 15 2 evt_9 1 0.4597 0.01839 0.9035 -0 15 3 evt_5 1 0.3616 0.01446 0.943 -0 15 4 evt_23 1 0.6584 0.02634 0.8774 -0 15 5 evt_16 1 0.4411 0.01764 0.9125 -0 15 6 evt_1 0 0 0 0.9414 -0 15 7 evt_21 0 0 0 0.923 -0 15 8 evt_11 1 0.4152 0.01661 0.8965 -0 15 9 evt_6 1 0.3885 0.01554 0.9255 -0 15 10 evt_19 1 0.254 0.01016 0.9021 -0 15 11 evt_17 0 0 0 0.956 -0 15 12 evt_12 0 0 0 0.991 -0 15 13 evt_3 1 0.4547 0.01819 0.8839 -0 15 14 evt_14 1 0.3686 0.01475 0.9194 -0 15 15 evt_18 0 0 0 0.9441 -0 15 16 evt_10 1 0.2861 0.01144 0.8998 -0 15 17 evt_13 1 0.294 0.01176 0.8924 -0 15 18 evt_7 0 0 0 0.9546 -0 15 19 evt_0 0 0 0 0.9451 -0 15 20 evt_22 1 0.3598 0.01439 0.9017 -0 15 21 evt_2 0 0 0 0.9717 -0 15 22 evt_15 0 0 0 0.961 -0 15 23 evt_20 0 0 0 0.9487 -0 16 0 evt_0 1 0.3564 0.01426 0.9444 -0 16 1 evt_22 1 0.3336 0.01334 0.9162 -0 16 2 evt_18 0 0 0 0.9554 -0 16 3 evt_13 1 0.2718 0.01087 0.9041 -0 16 4 evt_23 1 0.2604 0.01042 0.9139 -0 16 5 evt_15 1 0.2991 0.01196 0.9445 -0 16 6 evt_4 1 0.3609 0.01444 0.8907 -0 16 7 evt_21 0 0 0 0.9621 -0 16 8 evt_16 1 0.5723 0.02289 0.9104 -0 16 9 evt_11 1 0.4254 0.01701 0.9086 -0 16 10 evt_5 1 0.2887 0.01155 0.9538 -0 16 11 evt_17 0 0 0 0.9739 -0 16 12 evt_7 0 0 0 0.964 -0 16 13 evt_12 0 0 0 0.9934 -0 16 14 evt_8 0 0 0 0.9848 -0 16 15 evt_14 1 0.311 0.01244 0.9297 -0 16 16 evt_1 1 0.5817 0.02327 0.878 -0 16 17 evt_2 0 0 0 0.9714 -0 16 18 evt_20 0 0 0 0.9478 -0 16 19 evt_3 1 0.3913 0.01565 0.9134 -0 16 20 evt_6 1 0.299 0.01196 0.9435 -0 16 21 evt_19 0 0 0 0.9188 -0 16 22 evt_9 1 0.3699 0.0148 0.9028 -0 16 23 evt_10 1 0.2884 0.01154 0.9219 -0 17 0 evt_5 0 0 0 0.9586 -0 17 1 evt_20 0 0 0 0.9518 -0 17 2 evt_18 0 0 0 0.9698 -0 17 3 evt_13 0 0 0 0.9288 -0 17 4 evt_10 1 0.2591 0.01037 0.9319 -0 17 5 evt_4 1 0.2779 0.01111 0.9007 -0 17 6 evt_7 0 0 0 0.9544 -0 17 7 evt_12 0 0 0 0.9977 -0 17 8 evt_23 1 1.324 0.05298 0.7366 -0 17 9 evt_1 0 0 0 0.9572 -0 17 10 evt_3 1 0.3171 0.01268 0.9287 -0 17 11 evt_21 0 0 0 0.9614 -0 17 12 evt_17 0 0 0 0.9829 -0 17 13 evt_11 1 0.4422 0.01769 0.9128 -0 17 14 evt_22 0 0 0 0.9413 -0 17 15 evt_6 0 0 0 0.9608 -0 17 16 evt_9 1 0.3836 0.01534 0.9227 -0 17 17 evt_8 0 0 0 0.9846 -0 17 18 evt_2 0 0 0 0.973 -0 17 19 evt_19 0 0 0 0.9355 -0 17 20 evt_15 0 0 0 0.9653 -0 17 21 evt_16 1 0.3035 0.01214 0.9334 -0 17 22 evt_0 1 0.6912 0.02765 0.9075 -0 17 23 evt_14 0 0 0 0.9443 -0 18 0 evt_15 0 0 0 0.9679 -0 18 1 evt_2 0 0 0 0.9683 -0 18 2 evt_8 0 0 0 0.9789 -0 18 3 evt_6 1 0.2601 0.0104 0.949 -0 18 4 evt_4 1 0.3598 0.01439 0.9083 -0 18 5 evt_1 0 0 0 0.9531 -0 18 6 evt_18 0 0 0 0.9701 -0 18 7 evt_22 0 0 0 0.9663 -0 18 8 evt_0 1 0.6076 0.0243 0.9185 -0 18 9 evt_14 0 0 0 0.9551 -0 18 10 evt_5 0 0 0 0.9624 -0 18 11 evt_23 1 0.2908 0.01163 0.9152 -0 18 12 evt_17 0 0 0 0.986 -0 18 13 evt_9 1 0.4208 0.01683 0.9266 -0 18 14 evt_19 0 0 0 0.9465 -0 18 15 evt_21 0 0 0 0.9695 -0 18 16 evt_3 0 0 0 0.9441 -0 18 17 evt_16 1 0.3738 0.01495 0.9301 -0 18 18 evt_12 0 0 0 0.9983 -0 18 19 evt_13 1 0.5813 0.02325 0.8246 -0 18 20 evt_11 1 0.4019 0.01608 0.9049 -0 18 21 evt_10 0 0 0 0.9477 -0 18 22 evt_7 0 0 0 0.9787 -0 18 23 evt_20 0 0 0 0.9584 -0 19 0 evt_0 1 0.2797 0.01119 0.9598 -0 19 1 evt_10 0 0 0 0.9681 -0 19 2 evt_5 0 0 0 0.9746 -0 19 3 evt_11 1 0.3651 0.0146 0.9238 -0 19 4 evt_14 0 0 0 0.9584 -0 19 5 evt_9 1 0.3654 0.01462 0.9367 -0 19 6 evt_19 0 0 0 0.9656 -0 19 7 evt_13 0 0 0 0.9433 -0 19 8 evt_18 0 0 0 0.9768 -0 19 9 evt_12 0 0 0 0.9987 -0 19 10 evt_3 1 0.29 0.0116 0.9237 -0 19 11 evt_6 0 0 0 0.9833 -0 19 12 evt_2 0 0 0 0.9733 -0 19 13 evt_8 0 0 0 0.9816 -0 19 14 evt_23 1 0.2805 0.01122 0.9188 -0 19 15 evt_7 0 0 0 0.9806 -0 19 16 evt_22 1 0.2791 0.01116 0.9021 -0 19 17 evt_21 0 0 0 0.9412 -0 19 18 evt_1 0 0 0 0.9744 -0 19 19 evt_16 1 0.3218 0.01287 0.936 -0 19 20 evt_4 1 0.2834 0.01134 0.9192 -0 19 21 evt_20 0 0 0 0.9752 -0 19 22 evt_15 0 0 0 0.983 -0 19 23 evt_17 0 0 0 0.9902 -0 20 0 evt_17 0 0 0 0.9912 -0 20 1 evt_1 0 0 0 0.9782 -0 20 2 evt_4 0 0 0 0.9379 -0 20 3 evt_5 0 0 0 0.9698 -0 20 4 evt_15 0 0 0 0.976 -0 20 5 evt_11 0 0 0 0.9292 -0 20 6 evt_22 0 0 0 0.9439 -0 20 7 evt_23 1 0.3545 0.01418 0.9127 -0 20 8 evt_9 1 0.2853 0.01141 0.9413 -0 20 9 evt_16 1 0.9185 0.03674 0.8456 -0 20 10 evt_14 0 0 0 0.9695 -0 20 11 evt_7 0 0 0 0.9862 -0 20 12 evt_2 0 0 0 0.9791 -0 20 13 evt_6 0 0 0 0.9849 -0 20 14 evt_10 0 0 0 0.9807 -0 20 15 evt_3 0 0 0 0.9675 -0 20 16 evt_19 0 0 0 0.9719 -0 20 17 evt_18 0 0 0 0.9772 -0 20 18 evt_12 0 0 0 0.9989 -0 20 19 evt_8 0 0 0 0.9844 -0 20 20 evt_21 0 0 0 0.9538 -0 20 21 evt_0 0 0 0 0.9867 -0 20 22 evt_13 0 0 0 0.9461 -0 20 23 evt_20 0 0 0 0.978 -0 21 0 evt_23 1 0.3362 0.01345 0.92 -0 21 1 evt_6 0 0 0 0.9736 -0 21 2 evt_1 0 0 0 0.9713 -0 21 3 evt_7 0 0 0 0.9761 -0 21 4 evt_3 1 0.4949 0.0198 0.8558 -0 21 5 evt_13 1 0.2721 0.01088 0.9338 -0 21 6 evt_9 1 0.4568 0.01827 0.9187 -0 21 7 evt_4 1 0.3556 0.01422 0.9211 -0 21 8 evt_15 0 0 0 0.9573 -0 21 9 evt_17 0 0 0 0.9875 -0 21 10 evt_20 1 0.6732 0.02693 0.9019 -0 21 11 evt_12 0 0 0 0.9684 -0 21 12 evt_0 0 0 0 0.9889 -0 21 13 evt_8 0 0 0 0.9865 -0 21 14 evt_22 0 0 0 0.9537 -0 21 15 evt_10 0 0 0 0.9852 -0 21 16 evt_19 0 0 0 0.9774 -0 21 17 evt_14 1 0.2746 0.01099 0.9448 -0 21 18 evt_18 0 0 0 0.9747 -0 21 19 evt_21 0 0 0 0.9618 -0 21 20 evt_11 0 0 0 0.9247 -0 21 21 evt_5 0 0 0 0.9752 -0 21 22 evt_16 0 0 0 0.9232 -0 21 23 evt_2 0 0 0 0.9526 -0 22 0 evt_4 1 0.305 0.0122 0.9337 -0 22 1 evt_2 1 0.5857 0.02343 0.9041 -0 22 2 evt_21 0 0 0 0.9677 -0 22 3 evt_6 0 0 0 0.9804 -0 22 4 evt_7 0 0 0 0.9805 -0 22 5 evt_16 1 0.6965 0.02786 0.8989 -0 22 6 evt_20 1 0.2777 0.01111 0.9269 -0 22 7 evt_0 0 0 0 0.9684 -0 22 8 evt_14 0 0 0 0.9686 -0 22 9 evt_12 0 0 0 0.9913 -0 22 10 evt_8 0 0 0 0.9875 -0 22 11 evt_17 0 0 0 0.9872 -0 22 12 evt_22 0 0 0 0.9152 -0 22 13 evt_1 0 0 0 0.9759 -0 22 14 evt_9 1 0.3877 0.01551 0.9339 -0 22 15 evt_19 1 0.2567 0.01027 0.8996 -0 22 16 evt_10 0 0 0 0.9701 -0 22 17 evt_11 0 0 0 0.9352 -0 22 18 evt_3 0 0 0 0.9679 -0 22 19 evt_5 0 0 0 0.968 -0 22 20 evt_23 0 0 0 0.9686 -0 22 21 evt_18 0 0 0 0.9784 -0 22 22 evt_13 0 0 0 0.9407 -0 22 23 evt_15 1 0.2754 0.01102 0.9576 -0 23 0 evt_13 0 0 0 0.9435 -0 23 1 evt_9 1 0.2912 0.01165 0.9387 -0 23 2 evt_14 0 0 0 0.9678 -0 23 3 evt_5 0 0 0 0.9792 -0 23 4 evt_3 0 0 0 0.9675 -0 23 5 evt_19 0 0 0 0.9597 -0 23 6 evt_12 0 0 0 0.9924 -0 23 7 evt_11 1 0.7625 0.0305 0.8832 -0 23 8 evt_8 0 0 0 0.9873 -0 23 9 evt_17 0 0 0 0.9872 -0 23 10 evt_18 0 0 0 0.9816 -0 23 11 evt_23 0 0 0 0.9577 -0 23 12 evt_22 0 0 0 0.9314 -0 23 13 evt_21 0 0 0 0.9818 -0 23 14 evt_10 0 0 0 0.9738 -0 23 15 evt_20 1 0.2628 0.01051 0.9316 -0 23 16 evt_2 0 0 0 0.974 -0 23 17 evt_4 1 0.5356 0.02142 0.9141 -0 23 18 evt_6 0 0 0 0.9853 -0 23 19 evt_0 0 0 0 0.9895 -0 23 20 evt_1 0 0 0 0.9485 -0 23 21 evt_7 0 0 0 0.9877 -0 23 22 evt_15 1 0.2692 0.01077 0.9596 -0 23 23 evt_16 0 0 0 0.9363 -0 24 0 evt_9 1 0.3625 0.0145 0.9398 -0 24 1 evt_11 0 0 0 0.9483 -0 24 2 evt_2 0 0 0 0.9791 -0 24 3 evt_8 0 0 0 0.9902 -0 24 4 evt_6 0 0 0 0.9873 -0 24 5 evt_19 0 0 0 0.9627 -0 24 6 evt_18 0 0 0 0.9834 -0 24 7 evt_16 0 0 0 0.9567 -0 24 8 evt_21 0 0 0 0.9694 -0 24 9 evt_10 0 0 0 0.98 -0 24 10 evt_0 0 0 0 0.988 -0 24 11 evt_22 0 0 0 0.9556 -0 24 12 evt_20 0 0 0 0.9698 -0 24 13 evt_4 0 0 0 0.9508 -0 24 14 evt_12 0 0 0 0.9948 -0 24 15 evt_23 0 0 0 0.9742 -0 24 16 evt_14 0 0 0 0.9723 -0 24 17 evt_17 0 0 0 0.9891 -0 24 18 evt_5 0 0 0 0.9514 -0 24 19 evt_7 0 0 0 0.9878 -0 24 20 evt_1 0 0 0 0.9774 -0 24 21 evt_13 1 0.3625 0.0145 0.8919 -0 24 22 evt_15 0 0 0 0.9673 -0 24 23 evt_3 0 0 0 0.9788 -0 25 0 evt_10 0 0 0 0.98 -0 25 1 evt_18 0 0 0 0.9868 -0 25 2 evt_2 0 0 0 0.9856 -0 25 3 evt_8 0 0 0 0.9899 -0 25 4 evt_6 0 0 0 0.9778 -0 25 5 evt_12 0 0 0 0.9729 -0 25 6 evt_19 0 0 0 0.9675 -0 25 7 evt_1 0 0 0 0.9763 -0 25 8 evt_7 0 0 0 0.9859 -0 25 9 evt_17 0 0 0 0.9912 -0 25 10 evt_0 0 0 0 0.9858 -0 25 11 evt_13 0 0 0 0.9572 -0 25 12 evt_21 0 0 0 0.9733 -0 25 13 evt_4 1 0.2702 0.01081 0.9368 -0 25 14 evt_20 0 0 0 0.9773 -0 25 15 evt_9 1 0.3561 0.01424 0.941 -0 25 16 evt_5 0 0 0 0.9736 -0 25 17 evt_23 0 0 0 0.9429 -0 25 18 evt_11 0 0 0 0.9629 -0 25 19 evt_15 0 0 0 0.9776 -0 25 20 evt_16 0 0 0 0.9654 -0 25 21 evt_22 0 0 0 0.9589 -0 25 22 evt_3 0 0 0 0.9801 -0 25 23 evt_14 0 0 0 0.9743 -0 26 0 evt_12 0 0 0 0.9978 -0 26 1 evt_9 1 0.3165 0.01266 0.9482 -0 26 2 evt_14 0 0 0 0.9756 -0 26 3 evt_17 0 0 0 0.9922 -0 26 4 evt_23 0 0 0 0.9508 -0 26 5 evt_6 0 0 0 0.9858 -0 26 6 evt_21 0 0 0 0.9838 -0 26 7 evt_8 0 0 0 0.9923 -0 26 8 evt_18 0 0 0 0.9888 -0 26 9 evt_5 0 0 0 0.9767 -0 26 10 evt_4 0 0 0 0.9526 -0 26 11 evt_16 0 0 0 0.9714 -0 26 12 evt_7 0 0 0 0.9861 -0 26 13 evt_13 0 0 0 0.9584 -0 26 14 evt_11 0 0 0 0.9697 -0 26 15 evt_15 0 0 0 0.9844 -0 26 16 evt_1 0 0 0 0.9758 -0 26 17 evt_3 0 0 0 0.9806 -0 26 18 evt_10 0 0 0 0.9867 -0 26 19 evt_0 0 0 0 0.9656 -0 26 20 evt_19 0 0 0 0.9773 -0 26 21 evt_2 0 0 0 0.9912 -0 26 22 evt_22 0 0 0 0.9518 -0 26 23 evt_20 1 0.2874 0.01149 0.9501 -0 27 0 evt_4 0 0 0 0.9582 -0 27 1 evt_9 1 0.2702 0.01081 0.9563 -0 27 2 evt_3 0 0 0 0.9847 -0 27 3 evt_0 0 0 0 0.9927 -0 27 4 evt_20 0 0 0 0.9734 -0 27 5 evt_13 0 0 0 0.9667 -0 27 6 evt_18 0 0 0 0.9886 -0 27 7 evt_2 0 0 0 0.9883 -0 27 8 evt_22 0 0 0 0.9542 -0 27 9 evt_8 0 0 0 0.9923 -0 27 10 evt_11 0 0 0 0.9816 -0 27 11 evt_5 0 0 0 0.9851 -0 27 12 evt_10 0 0 0 0.9859 -0 27 13 evt_17 0 0 0 0.9927 -0 27 14 evt_14 0 0 0 0.9779 -0 27 15 evt_23 0 0 0 0.9803 -0 27 16 evt_15 0 0 0 0.9866 -0 27 17 evt_19 0 0 0 0.983 -0 27 18 evt_21 1 0.4476 0.01791 0.9385 -0 27 19 evt_12 0 0 0 0.9948 -0 27 20 evt_16 0 0 0 0.9746 -0 27 21 evt_6 0 0 0 0.987 -0 27 22 evt_1 0 0 0 0.9841 -0 27 23 evt_7 0 0 0 0.9884 -0 28 0 evt_15 0 0 0 0.9897 -0 28 1 evt_5 0 0 0 0.9857 -0 28 2 evt_8 0 0 0 0.9924 -0 28 3 evt_10 0 0 0 0.9864 -0 28 4 evt_4 0 0 0 0.9352 -0 28 5 evt_16 0 0 0 0.9661 -0 28 6 evt_0 0 0 0 0.9962 -0 28 7 evt_9 0 0 0 0.9727 -0 28 8 evt_17 0 0 0 0.9937 -0 28 9 evt_18 0 0 0 0.9916 -0 28 10 evt_7 0 0 0 0.9899 -0 28 11 evt_3 0 0 0 0.9795 -0 28 12 evt_6 1 0.2913 0.01165 0.9544 -0 28 13 evt_20 0 0 0 0.9708 -0 28 14 evt_1 1 1.2 0.04801 0.7627 -0 28 15 evt_11 0 0 0 0.9828 -0 28 16 evt_13 0 0 0 0.9699 -0 28 17 evt_2 0 0 0 0.9908 -0 28 18 evt_12 0 0 0 0.9957 -0 28 19 evt_23 0 0 0 0.9751 -0 28 20 evt_22 0 0 0 0.9625 -0 28 21 evt_14 0 0 0 0.9835 -0 28 22 evt_19 0 0 0 0.9892 -0 28 23 evt_21 1 0.4217 0.01687 0.9409 -0 29 0 evt_11 0 0 0 0.9826 -0 29 1 evt_4 0 0 0 0.9745 -0 29 2 evt_14 0 0 0 0.9849 -0 29 3 evt_21 1 0.4035 0.01614 0.9444 -0 29 4 evt_1 0 0 0 0.9564 -0 29 5 evt_22 0 0 0 0.9685 -0 29 6 evt_13 0 0 0 0.9705 -0 29 7 evt_23 0 0 0 0.9769 -0 29 8 evt_8 0 0 0 0.9914 -0 29 9 evt_12 0 0 0 0.9899 -0 29 10 evt_7 0 0 0 0.9892 -0 29 11 evt_5 0 0 0 0.9779 -0 29 12 evt_19 0 0 0 0.9904 -0 29 13 evt_6 0 0 0 0.9866 -0 29 14 evt_18 0 0 0 0.9928 -0 29 15 evt_15 0 0 0 0.9923 -0 29 16 evt_3 0 0 0 0.9799 -0 29 17 evt_2 0 0 0 0.9928 -0 29 18 evt_17 1 0.7199 0.0288 0.8934 -0 29 19 evt_9 0 0 0 0.9781 -0 29 20 evt_10 0 0 0 0.9864 -0 29 21 evt_20 0 0 0 0.9718 -0 29 22 evt_0 0 0 0 0.9976 -0 29 23 evt_16 0 0 0 0.971 -0 30 0 evt_22 0 0 0 0.9756 -0 30 1 evt_1 0 0 0 0.9642 -0 30 2 evt_4 0 0 0 0.9754 -0 30 3 evt_8 0 0 0 0.993 -0 30 4 evt_16 0 0 0 0.9736 -0 30 5 evt_18 0 0 0 0.9909 -0 30 6 evt_11 0 0 0 0.9888 -0 30 7 evt_20 0 0 0 0.9548 -0 30 8 evt_19 0 0 0 0.992 -0 30 9 evt_10 0 0 0 0.96 -0 30 10 evt_5 0 0 0 0.9769 -0 30 11 evt_23 0 0 0 0.984 -0 30 12 evt_15 0 0 0 0.9944 -0 30 13 evt_2 0 0 0 0.995 -0 30 14 evt_17 0 0 0 0.9893 -0 30 15 evt_9 0 0 0 0.9791 -0 30 16 evt_6 0 0 0 0.9925 -0 30 17 evt_12 0 0 0 0.9975 -0 30 18 evt_21 1 0.3265 0.01306 0.9513 -0 30 19 evt_7 0 0 0 0.9922 -0 30 20 evt_3 0 0 0 0.993 -0 30 21 evt_13 0 0 0 0.9732 -0 30 22 evt_14 0 0 0 0.9859 -0 30 23 evt_0 0 0 0 0.9979 -0 31 0 evt_6 1 0.2584 0.01034 0.9603 -0 31 1 evt_17 0 0 0 0.9887 -0 31 2 evt_9 0 0 0 0.9735 -0 31 3 evt_15 0 0 0 0.9905 -0 31 4 evt_13 0 0 0 0.977 -0 31 5 evt_23 0 0 0 0.9775 -0 31 6 evt_8 0 0 0 0.9945 -0 31 7 evt_4 0 0 0 0.9796 -0 31 8 evt_7 0 0 0 0.9962 -0 31 9 evt_10 0 0 0 0.9892 -0 31 10 evt_2 0 0 0 0.9969 -0 31 11 evt_16 1 0.2756 0.01102 0.9537 -0 31 12 evt_1 0 0 0 0.9786 -0 31 13 evt_18 0 0 0 0.9913 -0 31 14 evt_3 0 0 0 0.9915 -0 31 15 evt_22 0 0 0 0.9514 -0 31 16 evt_5 0 0 0 0.9827 -0 31 17 evt_20 0 0 0 0.9589 -0 31 18 evt_21 0 0 0 0.9694 -0 31 19 evt_19 0 0 0 0.9921 -0 31 20 evt_11 0 0 0 0.9921 -0 31 21 evt_14 0 0 0 0.984 -0 31 22 evt_0 0 0 0 0.9978 -0 31 23 evt_12 0 0 0 0.9961 -0 32 0 evt_19 0 0 0 0.9823 -0 32 1 evt_11 0 0 0 0.9932 -0 32 2 evt_17 0 0 0 0.9889 -0 32 3 evt_13 0 0 0 0.9744 -0 32 4 evt_5 0 0 0 0.988 -0 32 5 evt_0 0 0 0 0.9979 -0 32 6 evt_7 0 0 0 0.9971 -0 32 7 evt_10 0 0 0 0.9895 -0 32 8 evt_16 0 0 0 0.9819 -0 32 9 evt_4 0 0 0 0.9804 -0 32 10 evt_20 0 0 0 0.9785 -0 32 11 evt_6 0 0 0 0.9922 -0 32 12 evt_9 0 0 0 0.9806 -0 32 13 evt_15 0 0 0 0.988 -0 32 14 evt_8 0 0 0 0.9949 -0 32 15 evt_1 0 0 0 0.9855 -0 32 16 evt_12 0 0 0 0.9934 -0 32 17 evt_14 0 0 0 0.9862 -0 32 18 evt_18 0 0 0 0.9917 -0 32 19 evt_2 0 0 0 0.9918 -0 32 20 evt_21 0 0 0 0.9844 -0 32 21 evt_23 0 0 0 0.9809 -0 32 22 evt_3 0 0 0 0.9851 -0 32 23 evt_22 0 0 0 0.9807 -0 33 0 evt_8 0 0 0 0.9961 -0 33 1 evt_4 0 0 0 0.984 -0 33 2 evt_0 1 0.5611 0.02244 0.9279 -0 33 3 evt_1 0 0 0 0.9893 -0 33 4 evt_22 0 0 0 0.9817 -0 33 5 evt_13 0 0 0 0.9767 -0 33 6 evt_16 0 0 0 0.9751 -0 33 7 evt_21 0 0 0 0.9913 -0 33 8 evt_6 0 0 0 0.9883 -0 33 9 evt_20 0 0 0 0.9795 -0 33 10 evt_9 0 0 0 0.9912 -0 33 11 evt_17 0 0 0 0.9724 -0 33 12 evt_12 0 0 0 0.9939 -0 33 13 evt_15 0 0 0 0.9897 -0 33 14 evt_11 0 0 0 0.9949 -0 33 15 evt_23 0 0 0 0.9867 -0 33 16 evt_10 0 0 0 0.9904 -0 33 17 evt_2 0 0 0 0.9931 -0 33 18 evt_18 0 0 0 0.9891 -0 33 19 evt_14 0 0 0 0.9894 -0 33 20 evt_3 0 0 0 0.9875 -0 33 21 evt_5 0 0 0 0.9716 -0 33 22 evt_7 0 0 0 0.9965 -0 33 23 evt_19 0 0 0 0.9954 -0 34 0 evt_16 0 0 0 0.9791 -0 34 1 evt_7 0 0 0 0.9967 -0 34 2 evt_6 0 0 0 0.9782 -0 34 3 evt_4 0 0 0 0.9804 -0 34 4 evt_1 0 0 0 0.9898 -0 34 5 evt_17 0 0 0 0.9931 -0 34 6 evt_20 0 0 0 0.984 -0 34 7 evt_14 0 0 0 0.9887 -0 34 8 evt_13 0 0 0 0.9797 -0 34 9 evt_18 0 0 0 0.9937 -0 34 10 evt_15 0 0 0 0.995 -0 34 11 evt_9 0 0 0 0.9784 -0 34 12 evt_2 0 0 0 0.9938 -0 34 13 evt_21 0 0 0 0.9926 -0 34 14 evt_3 0 0 0 0.9902 -0 34 15 evt_11 0 0 0 0.9959 -0 34 16 evt_19 0 0 0 0.9957 -0 34 17 evt_22 0 0 0 0.9839 -0 34 18 evt_8 0 0 0 0.9968 -0 34 19 evt_23 0 0 0 0.9853 -0 34 20 evt_10 0 0 0 0.9916 -0 34 21 evt_5 0 0 0 0.992 -0 34 22 evt_12 0 0 0 0.9823 -0 34 23 evt_0 0 0 0 0.9819 -0 35 0 evt_9 0 0 0 0.9805 -0 35 1 evt_10 0 0 0 0.9893 -0 35 2 evt_21 0 0 0 0.9941 -0 35 3 evt_7 0 0 0 0.998 -0 35 4 evt_22 0 0 0 0.9866 -0 35 5 evt_14 0 0 0 0.9893 -0 35 6 evt_12 0 0 0 0.996 -0 35 7 evt_15 0 0 0 0.9904 -0 35 8 evt_4 0 0 0 0.9852 -0 35 9 evt_19 0 0 0 0.9943 -0 35 10 evt_3 0 0 0 0.9902 -0 35 11 evt_6 0 0 0 0.9965 -0 35 12 evt_11 0 0 0 0.9942 -0 35 13 evt_17 0 0 0 0.9942 -0 35 14 evt_8 0 0 0 0.9974 -0 35 15 evt_5 0 0 0 0.9919 -0 35 16 evt_0 0 0 0 0.9907 -0 35 17 evt_2 0 0 0 0.9919 -0 35 18 evt_13 0 0 0 0.9839 -0 35 19 evt_1 0 0 0 0.9931 -0 35 20 evt_20 0 0 0 0.9784 -0 35 21 evt_23 0 0 0 0.9899 -0 35 22 evt_16 0 0 0 0.982 -0 35 23 evt_18 0 0 0 0.9941 -0 36 0 evt_9 0 0 0 0.9898 -0 36 1 evt_3 0 0 0 0.9905 -0 36 2 evt_13 0 0 0 0.9862 -0 36 3 evt_17 0 0 0 0.9953 -0 36 4 evt_0 0 0 0 0.9919 -0 36 5 evt_4 0 0 0 0.9882 -0 36 6 evt_20 0 0 0 0.984 -0 36 7 evt_5 0 0 0 0.9788 -0 36 8 evt_10 0 0 0 0.9946 -0 36 9 evt_7 0 0 0 0.9982 -0 36 10 evt_1 0 0 0 0.9919 -0 36 11 evt_21 0 0 0 0.9941 -0 36 12 evt_23 0 0 0 0.9859 -0 36 13 evt_2 0 0 0 0.997 -0 36 14 evt_18 0 0 0 0.9944 -0 36 15 evt_19 0 0 0 0.9948 -0 36 16 evt_12 0 0 0 0.9962 -0 36 17 evt_8 0 0 0 0.9975 -0 36 18 evt_15 0 0 0 0.9903 -0 36 19 evt_22 0 0 0 0.988 -0 36 20 evt_6 0 0 0 0.9973 -0 36 21 evt_14 0 0 0 0.9917 -0 36 22 evt_11 0 0 0 0.9972 -0 36 23 evt_16 0 0 0 0.9906 -0 37 0 evt_16 0 0 0 0.9883 -0 37 1 evt_0 0 0 0 0.9927 -0 37 2 evt_13 0 0 0 0.9881 -0 37 3 evt_1 0 0 0 0.9923 -0 37 4 evt_4 0 0 0 0.9902 -0 37 5 evt_19 0 0 0 0.9955 -0 37 6 evt_18 0 0 0 0.9857 -0 37 7 evt_14 0 0 0 0.9921 -0 37 8 evt_9 0 0 0 0.9913 -0 37 9 evt_3 0 0 0 0.9967 -0 37 10 evt_2 0 0 0 0.9969 -0 37 11 evt_15 0 0 0 0.9915 -0 37 12 evt_8 0 0 0 0.9975 -0 37 13 evt_6 0 0 0 0.9777 -0 37 14 evt_21 0 0 0 0.9946 -0 37 15 evt_22 0 0 0 0.9869 -0 37 16 evt_23 0 0 0 0.989 -0 37 17 evt_7 0 0 0 0.9933 -0 37 18 evt_11 0 0 0 0.9963 -0 37 19 evt_17 0 0 0 0.9959 -0 37 20 evt_5 0 0 0 0.9882 -0 37 21 evt_20 0 0 0 0.9889 -0 37 22 evt_10 0 0 0 0.9947 -0 37 23 evt_12 0 0 0 0.9973 -0 38 0 evt_1 0 0 0 0.9927 -0 38 1 evt_19 0 0 0 0.997 -0 38 2 evt_23 0 0 0 0.9936 -0 38 3 evt_12 0 0 0 0.9983 -0 38 4 evt_10 0 0 0 0.9951 -0 38 5 evt_15 0 0 0 0.992 -0 38 6 evt_20 0 0 0 0.9889 -0 38 7 evt_7 0 0 0 0.9985 -0 38 8 evt_22 0 0 0 0.989 -0 38 9 evt_18 0 0 0 0.9946 -0 38 10 evt_0 0 0 0 0.9944 -0 38 11 evt_8 0 0 0 0.9971 -0 38 12 evt_6 0 0 0 0.9976 -0 38 13 evt_5 0 0 0 0.9899 -0 38 14 evt_3 0 0 0 0.9961 -0 38 15 evt_9 0 0 0 0.9949 -0 38 16 evt_11 0 0 0 0.9971 -0 38 17 evt_13 0 0 0 0.989 -0 38 18 evt_2 0 0 0 0.9968 -0 38 19 evt_4 0 0 0 0.9903 -0 38 20 evt_17 0 0 0 0.9963 -0 38 21 evt_14 0 0 0 0.9929 -0 38 22 evt_16 0 0 0 0.9926 -0 38 23 evt_21 0 0 0 0.9957 -1 0 0 evt_16 1 6.848 0.2739 0.1435 -1 0 1 evt_12 1 5.058 0.2023 0.3853 -1 0 2 evt_8 1 9.914 0.3965 -0.2417 -1 0 3 evt_3 1 6.471 0.2589 0.1899 -1 0 4 evt_4 1 5.78 0.2312 0.2326 -1 0 5 evt_5 1 6.731 0.2692 0.1182 -1 0 6 evt_19 1 8.117 0.3247 -0.03995 -1 0 7 evt_2 1 4.497 0.1799 0.4042 -1 0 8 evt_17 1 9.773 0.3909 -0.2311 -1 0 9 evt_7 1 9.081 0.3632 -0.2398 -1 0 10 evt_21 1 7.176 0.2871 0.04076 -1 0 11 evt_6 1 11.38 0.4551 -0.428 -1 0 12 evt_9 1 8.124 0.325 -0.05187 -1 0 13 evt_1 1 9.513 0.3805 -0.1896 -1 0 14 evt_10 1 7.463 0.2985 0.03613 -1 0 15 evt_11 1 5.677 0.2271 0.2078 -1 0 16 evt_14 1 9.147 0.3659 -0.1891 -1 0 17 evt_22 1 7.44 0.2976 -0.055 -1 0 18 evt_20 1 10.52 0.421 -0.444 -1 0 19 evt_15 1 4.777 0.1911 0.3836 -1 0 20 evt_0 1 7.971 0.3188 -0.03048 -1 0 21 evt_13 1 5.25 0.21 0.2806 -1 0 22 evt_18 1 9.338 0.3735 -0.2245 -1 0 23 evt_23 1 8.147 0.3259 -0.07679 -1 1 0 evt_23 1 8.117 0.3247 -0.08137 -1 1 1 evt_4 1 4.035 0.1614 0.4581 -1 1 2 evt_16 1 4.628 0.1851 0.3449 -1 1 3 evt_0 1 6.361 0.2544 0.1515 -1 1 4 evt_15 1 3.375 0.135 0.5198 -1 1 5 evt_20 1 8.08 0.3232 -0.1185 -1 1 6 evt_17 1 7.56 0.3024 -0.04589 -1 1 7 evt_19 1 5.507 0.2203 0.2394 -1 1 8 evt_12 1 5.125 0.205 0.3076 -1 1 9 evt_6 1 10.25 0.4101 -0.4569 -1 1 10 evt_21 1 6.243 0.2497 0.1676 -1 1 11 evt_5 1 7.586 0.3034 0.002136 -1 1 12 evt_7 1 8.919 0.3568 -0.2381 -1 1 13 evt_9 1 8.723 0.3489 -0.185 -1 1 14 evt_18 1 8.647 0.3459 -0.1875 -1 1 15 evt_22 1 8.48 0.3392 -0.2061 -1 1 16 evt_13 1 3.849 0.154 0.3785 -1 1 17 evt_14 1 7.118 0.2847 -0.0219 -1 1 18 evt_3 1 7.236 0.2894 0.04345 -1 1 19 evt_10 1 5.873 0.2349 0.1202 -1 1 20 evt_1 1 7.789 0.3116 -0.005122 -1 1 21 evt_11 1 3.668 0.1467 0.4559 -1 1 22 evt_2 1 4.965 0.1986 0.3701 -1 1 23 evt_8 1 7.389 0.2956 0.01169 -1 2 0 evt_9 1 7.295 0.2918 -0.05919 -1 2 1 evt_10 1 4.998 0.1999 0.2679 -1 2 2 evt_8 1 6.003 0.2401 0.1832 -1 2 3 evt_21 1 7.224 0.2889 0.05959 -1 2 4 evt_18 1 4.954 0.1982 0.3305 -1 2 5 evt_2 1 5.072 0.2029 0.3193 -1 2 6 evt_17 1 6.539 0.2615 0.09371 -1 2 7 evt_22 1 8.972 0.3589 -0.1842 -1 2 8 evt_5 1 5.592 0.2237 0.197 -1 2 9 evt_7 1 3.788 0.1515 0.4613 -1 2 10 evt_20 1 7.671 0.3068 -0.06872 -1 2 11 evt_4 1 5.906 0.2362 0.2237 -1 2 12 evt_15 1 2.921 0.1168 0.537 -1 2 13 evt_3 1 4.934 0.1973 0.2431 -1 2 14 evt_11 1 3.936 0.1574 0.4296 -1 2 15 evt_14 1 4.223 0.1689 0.3874 -1 2 16 evt_16 1 3.934 0.1573 0.3664 -1 2 17 evt_6 1 5.593 0.2237 0.1586 -1 2 18 evt_12 1 4.086 0.1634 0.409 -1 2 19 evt_23 1 5.114 0.2046 0.2212 -1 2 20 evt_19 1 6.312 0.2525 0.08792 -1 2 21 evt_0 1 3.517 0.1407 0.4671 -1 2 22 evt_1 1 7.559 0.3024 -0.0348 -1 2 23 evt_13 1 4.72 0.1888 0.2797 -1 3 0 evt_7 1 4.034 0.1614 0.437 -1 3 1 evt_16 1 2.661 0.1064 0.5996 -1 3 2 evt_17 1 4.66 0.1864 0.3221 -1 3 3 evt_3 1 4.856 0.1942 0.282 -1 3 4 evt_2 1 5.403 0.2161 0.2431 -1 3 5 evt_10 1 2.844 0.1138 0.5099 -1 3 6 evt_11 1 2.924 0.117 0.5553 -1 3 7 evt_19 1 6.015 0.2406 0.1806 -1 3 8 evt_14 1 4.401 0.1761 0.3848 -1 3 9 evt_13 1 3.805 0.1522 0.3931 -1 3 10 evt_20 1 5.841 0.2337 0.1459 -1 3 11 evt_18 1 2.692 0.1077 0.5933 -1 3 12 evt_22 1 7.649 0.306 -0.05623 -1 3 13 evt_6 1 3.01 0.1204 0.4373 -1 3 14 evt_9 1 3.321 0.1328 0.5037 -1 3 15 evt_4 1 3.712 0.1485 0.4416 -1 3 16 evt_8 1 7.156 0.2863 0.05712 -1 3 17 evt_5 1 4.201 0.168 0.3675 -1 3 18 evt_23 1 5.394 0.2158 0.1944 -1 3 19 evt_15 1 3.753 0.1501 0.4392 -1 3 20 evt_0 1 2.295 0.09182 0.6474 -1 3 21 evt_12 1 2.626 0.105 0.5643 -1 3 22 evt_1 1 7.559 0.3024 -0.005584 -1 3 23 evt_21 1 4.989 0.1996 0.2655 -1 4 0 evt_16 1 2.176 0.08702 0.6571 -1 4 1 evt_13 1 2.421 0.09683 0.6247 -1 4 2 evt_20 1 4.163 0.1665 0.4244 -1 4 3 evt_11 1 2.346 0.09385 0.6752 -1 4 4 evt_15 1 4.256 0.1702 0.4065 -1 4 5 evt_19 1 5.054 0.2021 0.2624 -1 4 6 evt_17 1 2.504 0.1002 0.559 -1 4 7 evt_6 1 2.992 0.1197 0.4966 -1 4 8 evt_7 1 1.724 0.06897 0.7265 -1 4 9 evt_10 1 1.789 0.07155 0.6929 -1 4 10 evt_1 1 4.972 0.1989 0.2639 -1 4 11 evt_18 1 2.536 0.1014 0.6192 -1 4 12 evt_2 1 6.11 0.2444 0.171 -1 4 13 evt_5 1 3.644 0.1458 0.4613 -1 4 14 evt_4 1 6.462 0.2585 0.01108 -1 4 15 evt_3 1 4.952 0.1981 0.2638 -1 4 16 evt_22 1 5.662 0.2265 0.1048 -1 4 17 evt_12 1 1.427 0.05707 0.7586 -1 4 18 evt_9 1 6.496 0.2598 0.05309 -1 4 19 evt_8 1 5.755 0.2302 0.2175 -1 4 20 evt_14 1 5.9 0.236 0.09689 -1 4 21 evt_21 1 5.515 0.2206 0.1807 -1 4 22 evt_0 1 2.289 0.09156 0.6681 -1 4 23 evt_23 1 3.718 0.1487 0.4609 -1 5 0 evt_1 1 5.442 0.2177 0.1945 -1 5 1 evt_8 1 3.198 0.1279 0.5422 -1 5 2 evt_18 1 2.491 0.09963 0.6508 -1 5 3 evt_7 1 3.306 0.1323 0.4899 -1 5 4 evt_12 1 1.889 0.07555 0.7345 -1 5 5 evt_0 1 1.912 0.07647 0.6981 -1 5 6 evt_4 1 3.494 0.1397 0.4892 -1 5 7 evt_21 1 3.938 0.1575 0.3604 -1 5 8 evt_23 1 2.627 0.1051 0.575 -1 5 9 evt_5 1 1.126 0.04504 0.8367 -1 5 10 evt_10 1 1.757 0.07029 0.6979 -1 5 11 evt_20 1 4.298 0.1719 0.3607 -1 5 12 evt_2 1 3.385 0.1354 0.4867 -1 5 13 evt_11 1 1.404 0.05616 0.7635 -1 5 14 evt_14 1 3.596 0.1438 0.4903 -1 5 15 evt_22 1 4.484 0.1793 0.3524 -1 5 16 evt_3 1 3.235 0.1294 0.4922 -1 5 17 evt_15 1 2.13 0.0852 0.6215 -1 5 18 evt_6 1 2.916 0.1166 0.481 -1 5 19 evt_19 1 2.772 0.1109 0.613 -1 5 20 evt_17 1 3.973 0.1589 0.4467 -1 5 21 evt_13 1 1.47 0.05878 0.7549 -1 5 22 evt_16 1 2.629 0.1052 0.6417 -1 5 23 evt_9 1 3.392 0.1357 0.5325 -1 6 0 evt_0 1 0.9831 0.03932 0.8072 -1 6 1 evt_19 1 2.769 0.1108 0.62 -1 6 2 evt_6 1 2.943 0.1177 0.507 -1 6 3 evt_14 1 3.142 0.1257 0.5532 -1 6 4 evt_2 1 2.83 0.1132 0.5878 -1 6 5 evt_16 1 0.5427 0.02171 0.9094 -1 6 6 evt_20 1 4.849 0.1939 0.2985 -1 6 7 evt_4 1 4.169 0.1668 0.4368 -1 6 8 evt_18 1 2.249 0.08996 0.6727 -1 6 9 evt_11 1 0.6405 0.02562 0.8533 -1 6 10 evt_23 1 2.542 0.1017 0.6472 -1 6 11 evt_3 1 2.253 0.09012 0.608 -1 6 12 evt_21 1 3.42 0.1368 0.4431 -1 6 13 evt_17 1 3.916 0.1566 0.4535 -1 6 14 evt_12 1 1.499 0.05995 0.7713 -1 6 15 evt_10 1 2.614 0.1046 0.5696 -1 6 16 evt_1 1 4.203 0.1681 0.3897 -1 6 17 evt_9 1 0.9931 0.03973 0.8071 -1 6 18 evt_5 1 0.3908 0.01563 0.9128 -1 6 19 evt_15 1 1.925 0.07701 0.6467 -1 6 20 evt_22 1 4.26 0.1704 0.4063 -1 6 21 evt_13 1 0.9698 0.03879 0.8376 -1 6 22 evt_7 1 0.3145 0.01258 0.8811 -1 6 23 evt_8 1 1.995 0.07979 0.7055 -1 7 0 evt_1 1 3.971 0.1589 0.4255 -1 7 1 evt_8 1 1.569 0.06278 0.74 -1 7 2 evt_19 1 2.667 0.1067 0.6302 -1 7 3 evt_10 1 3.221 0.1288 0.4708 -1 7 4 evt_2 1 1.832 0.07326 0.7008 -1 7 5 evt_16 1 0.4981 0.01992 0.8999 -1 7 6 evt_14 1 3.447 0.1379 0.4523 -1 7 7 evt_4 1 2.519 0.1008 0.6501 -1 7 8 evt_23 1 2.199 0.08797 0.6705 -1 7 9 evt_21 1 1.271 0.05083 0.7861 -1 7 10 evt_7 1 0.3971 0.01588 0.931 -1 7 11 evt_17 1 2.702 0.1081 0.5616 -1 7 12 evt_5 1 0.2667 0.01067 0.9252 -1 7 13 evt_0 1 0.8065 0.03226 0.8561 -1 7 14 evt_15 1 4.884 0.1954 0.2791 -1 7 15 evt_6 1 1.622 0.06489 0.7066 -1 7 16 evt_22 1 3.781 0.1513 0.4247 -1 7 17 evt_9 1 2.556 0.1022 0.5561 -1 7 18 evt_3 1 1.99 0.0796 0.6399 -1 7 19 evt_18 1 1.97 0.07879 0.7121 -1 7 20 evt_12 1 1.153 0.04612 0.8077 -1 7 21 evt_20 1 4.045 0.1618 0.4247 -1 7 22 evt_11 1 1.027 0.04108 0.8287 -1 7 23 evt_13 1 0.5471 0.02188 0.88 -1 8 0 evt_4 1 1.739 0.06956 0.7067 -1 8 1 evt_15 1 1.084 0.04336 0.7607 -1 8 2 evt_9 1 1.039 0.04155 0.833 -1 8 3 evt_2 1 0.522 0.02088 0.87 -1 8 4 evt_12 1 1.313 0.05251 0.7949 -1 8 5 evt_0 1 0.6455 0.02582 0.883 -1 8 6 evt_3 1 1.688 0.06753 0.7269 -1 8 7 evt_18 1 1.634 0.06537 0.759 -1 8 8 evt_8 1 1.139 0.04554 0.8169 -1 8 9 evt_23 1 1.514 0.06058 0.7847 -1 8 10 evt_13 1 0.737 0.02948 0.8943 -1 8 11 evt_16 1 1.748 0.06992 0.7406 -1 8 12 evt_17 1 2.657 0.1063 0.5697 -1 8 13 evt_14 1 4.482 0.1793 0.2713 -1 8 14 evt_7 1 0.7474 0.0299 0.8725 -1 8 15 evt_11 1 0.9379 0.03751 0.8413 -1 8 16 evt_21 1 0.7327 0.02931 0.8503 -1 8 17 evt_5 1 0.2551 0.01021 0.9289 -1 8 18 evt_20 1 3.436 0.1374 0.4951 -1 8 19 evt_1 1 1.899 0.07595 0.6482 -1 8 20 evt_6 1 1.434 0.05737 0.7688 -1 8 21 evt_10 1 1.129 0.04516 0.8164 -1 8 22 evt_22 1 3.104 0.1241 0.4732 -1 8 23 evt_19 1 1.295 0.05179 0.7425 -1 9 0 evt_19 1 1.28 0.05122 0.7504 -1 9 1 evt_4 1 0.6744 0.02698 0.8904 -1 9 2 evt_10 1 1.276 0.05103 0.7865 -1 9 3 evt_9 1 1.016 0.04062 0.8389 -1 9 4 evt_12 1 0.9274 0.0371 0.8329 -1 9 5 evt_5 0 0 0 0.933 -1 9 6 evt_16 0 0 0 0.9412 -1 9 7 evt_23 1 1.14 0.04561 0.8267 -1 9 8 evt_8 1 1.032 0.04127 0.8407 -1 9 9 evt_15 1 1.271 0.05085 0.7887 -1 9 10 evt_1 1 0.9292 0.03717 0.7716 -1 9 11 evt_0 1 0.6014 0.02405 0.8905 -1 9 12 evt_14 1 3.579 0.1432 0.4212 -1 9 13 evt_17 1 1.515 0.06061 0.7169 -1 9 14 evt_2 1 0.4629 0.01852 0.907 -1 9 15 evt_18 1 1.402 0.0561 0.7711 -1 9 16 evt_20 1 3.967 0.1587 0.4394 -1 9 17 evt_21 1 0.3818 0.01527 0.91 -1 9 18 evt_13 1 0.6333 0.02533 0.9096 -1 9 19 evt_6 1 4.551 0.1821 0.3313 -1 9 20 evt_7 1 0.4901 0.0196 0.9285 -1 9 21 evt_11 1 0.3698 0.01479 0.8939 -1 9 22 evt_22 1 1.909 0.07635 0.6385 -1 9 23 evt_3 1 2.249 0.08997 0.673 -1 10 0 evt_6 1 1.165 0.04658 0.7731 -1 10 1 evt_18 1 0.8197 0.03279 0.8264 -1 10 2 evt_21 1 0.3124 0.01249 0.9129 -1 10 3 evt_23 1 1.065 0.04261 0.8435 -1 10 4 evt_2 1 0.3529 0.01412 0.9305 -1 10 5 evt_22 1 1.216 0.04862 0.7512 -1 10 6 evt_14 1 3.275 0.131 0.5045 -1 10 7 evt_8 1 0.5562 0.02225 0.878 -1 10 8 evt_12 1 2.286 0.09145 0.6669 -1 10 9 evt_19 1 1.348 0.05394 0.717 -1 10 10 evt_10 1 1.901 0.07602 0.6625 -1 10 11 evt_4 1 0.3324 0.0133 0.9281 -1 10 12 evt_20 1 1.319 0.05277 0.7871 -1 10 13 evt_7 1 0.4398 0.01759 0.9307 -1 10 14 evt_13 0 0 0 0.9715 -1 10 15 evt_17 1 1.485 0.0594 0.7366 -1 10 16 evt_3 1 0.7406 0.02962 0.8247 -1 10 17 evt_0 1 0.5338 0.02135 0.9003 -1 10 18 evt_15 1 1.13 0.0452 0.8146 -1 10 19 evt_11 1 0.3439 0.01375 0.8932 -1 10 20 evt_1 1 0.5635 0.02254 0.8221 -1 10 21 evt_9 1 0.6434 0.02573 0.8385 -1 10 22 evt_5 1 0.5195 0.02078 0.9037 -1 10 23 evt_16 0 0 0 0.9706 -1 11 0 evt_8 1 1.624 0.06496 0.7559 -1 11 1 evt_17 1 1.461 0.05842 0.7469 -1 11 2 evt_5 1 0.459 0.01836 0.9102 -1 11 3 evt_23 1 0.8704 0.03482 0.8719 -1 11 4 evt_12 1 0.4534 0.01813 0.8907 -1 11 5 evt_6 1 1.103 0.04411 0.7728 -1 11 6 evt_4 1 2.629 0.1052 0.5894 -1 11 7 evt_13 0 0 0 0.9666 -1 11 8 evt_20 1 0.4471 0.01788 0.8867 -1 11 9 evt_7 1 0.258 0.01032 0.9595 -1 11 10 evt_1 1 0.531 0.02124 0.8845 -1 11 11 evt_15 1 0.6039 0.02416 0.8619 -1 11 12 evt_10 1 0.2636 0.01054 0.9265 -1 11 13 evt_22 1 0.6649 0.0266 0.8449 -1 11 14 evt_18 1 0.4053 0.01621 0.8981 -1 11 15 evt_11 0 0 0 0.9186 -1 11 16 evt_0 1 0.2961 0.01184 0.9224 -1 11 17 evt_21 1 0.2927 0.01171 0.9267 -1 11 18 evt_2 0 0 0 0.9541 -1 11 19 evt_14 1 2.412 0.0965 0.6151 -1 11 20 evt_9 1 1.635 0.06542 0.6837 -1 11 21 evt_3 1 1.775 0.07101 0.7072 -1 11 22 evt_16 1 0.3662 0.01465 0.9441 -1 11 23 evt_19 1 2.895 0.1158 0.5301 -1 12 0 evt_19 1 1.105 0.0442 0.8048 -1 12 1 evt_23 1 0.536 0.02144 0.892 -1 12 2 evt_20 1 0.4258 0.01703 0.894 -1 12 3 evt_6 1 1.093 0.04372 0.7775 -1 12 4 evt_17 1 1.43 0.0572 0.7635 -1 12 5 evt_0 0 0 0 0.9398 -1 12 6 evt_5 0 0 0 0.9558 -1 12 7 evt_21 1 0.3485 0.01394 0.9414 -1 12 8 evt_10 1 0.3122 0.01249 0.9367 -1 12 9 evt_14 1 1.321 0.05283 0.7316 -1 12 10 evt_2 1 0.28 0.0112 0.9327 -1 12 11 evt_4 1 0.2838 0.01135 0.9406 -1 12 12 evt_15 1 0.2877 0.01151 0.904 -1 12 13 evt_8 1 0.4239 0.01696 0.9233 -1 12 14 evt_11 0 0 0 0.944 -1 12 15 evt_12 0 0 0 0.9306 -1 12 16 evt_16 1 0.2979 0.01192 0.951 -1 12 17 evt_22 1 6.182 0.2473 0.1288 -1 12 18 evt_13 0 0 0 0.9653 -1 12 19 evt_3 0 0 0 0.8624 -1 12 20 evt_1 1 2.705 0.1082 0.5725 -1 12 21 evt_9 1 0.8958 0.03583 0.8477 -1 12 22 evt_7 0 0 0 0.9837 -1 12 23 evt_18 1 0.285 0.0114 0.9325 -1 13 0 evt_11 0 0 0 0.9328 -1 13 1 evt_23 1 0.4629 0.01852 0.9085 -1 13 2 evt_7 0 0 0 0.9769 -1 13 3 evt_1 1 0.4824 0.0193 0.8852 -1 13 4 evt_21 1 0.2866 0.01146 0.9511 -1 13 5 evt_8 1 0.3481 0.01392 0.936 -1 13 6 evt_6 1 1.374 0.05494 0.7563 -1 13 7 evt_20 1 0.3578 0.01431 0.9054 -1 13 8 evt_3 1 0.2604 0.01042 0.8867 -1 13 9 evt_15 1 0.2814 0.01126 0.9576 -1 13 10 evt_22 1 0.3553 0.01421 0.8497 -1 13 11 evt_18 0 0 0 0.9638 -1 13 12 evt_17 1 1.322 0.05289 0.7643 -1 13 13 evt_13 0 0 0 0.9653 -1 13 14 evt_9 1 0.8785 0.03514 0.8521 -1 13 15 evt_12 0 0 0 0.9394 -1 13 16 evt_4 1 0.3004 0.01202 0.9452 -1 13 17 evt_0 0 0 0 0.9509 -1 13 18 evt_2 0 0 0 0.9726 -1 13 19 evt_19 1 0.6872 0.02749 0.8585 -1 13 20 evt_5 1 0.2832 0.01133 0.9587 -1 13 21 evt_10 0 0 0 0.9444 -1 13 22 evt_16 1 0.3132 0.01253 0.9551 -1 13 23 evt_14 1 0.7663 0.03065 0.8108 -1 14 0 evt_16 1 0.3117 0.01247 0.9551 -1 14 1 evt_18 0 0 0 0.969 -1 14 2 evt_2 0 0 0 0.974 -1 14 3 evt_6 1 0.7448 0.02979 0.7896 -1 14 4 evt_14 1 0.6928 0.02771 0.8353 -1 14 5 evt_9 1 0.5839 0.02336 0.8653 -1 14 6 evt_20 1 0.313 0.01252 0.9045 -1 14 7 evt_21 0 0 0 0.9651 -1 14 8 evt_1 1 0.4215 0.01686 0.9017 -1 14 9 evt_8 0 0 0 0.955 -1 14 10 evt_11 1 0.252 0.01008 0.931 -1 14 11 evt_19 1 0.6282 0.02513 0.8766 -1 14 12 evt_22 1 0.3463 0.01385 0.8537 -1 14 13 evt_17 1 1.255 0.05018 0.7772 -1 14 14 evt_3 1 1.707 0.06827 0.712 -1 14 15 evt_12 0 0 0 0.9503 -1 14 16 evt_5 1 0.2669 0.01068 0.9601 -1 14 17 evt_0 0 0 0 0.9199 -1 14 18 evt_4 1 0.3881 0.01552 0.9403 -1 14 19 evt_7 0 0 0 0.97 -1 14 20 evt_10 0 0 0 0.9516 -1 14 21 evt_23 1 0.3468 0.01387 0.9167 -1 14 22 evt_13 0 0 0 0.9727 -1 14 23 evt_15 0 0 0 0.9667 -1 15 0 evt_5 0 0 0 0.9636 -1 15 1 evt_10 0 0 0 0.9563 -1 15 2 evt_4 1 0.3569 0.01428 0.9455 -1 15 3 evt_16 0 0 0 0.9656 -1 15 4 evt_22 1 0.2741 0.01096 0.8873 -1 15 5 evt_23 1 0.287 0.01148 0.9233 -1 15 6 evt_20 1 0.2774 0.0111 0.9101 -1 15 7 evt_17 1 3.542 0.1417 0.465 -1 15 8 evt_19 1 0.6002 0.02401 0.8836 -1 15 9 evt_6 1 0.9922 0.03969 0.7832 -1 15 10 evt_12 0 0 0 0.9591 -1 15 11 evt_21 0 0 0 0.9636 -1 15 12 evt_7 1 0.6569 0.02628 0.8619 -1 15 13 evt_9 1 0.5719 0.02288 0.8763 -1 15 14 evt_3 1 0.3033 0.01213 0.8954 -1 15 15 evt_14 1 0.5675 0.0227 0.869 -1 15 16 evt_2 0 0 0 0.9807 -1 15 17 evt_18 0 0 0 0.9757 -1 15 18 evt_13 0 0 0 0.9779 -1 15 19 evt_15 1 0.3975 0.0159 0.9415 -1 15 20 evt_11 0 0 0 0.9518 -1 15 21 evt_8 0 0 0 0.9813 -1 15 22 evt_1 1 0.3837 0.01535 0.9148 -1 15 23 evt_0 0 0 0 0.9637 -1 16 0 evt_8 0 0 0 0.9789 -1 16 1 evt_22 0 0 0 0.9043 -1 16 2 evt_12 0 0 0 0.9619 -1 16 3 evt_17 1 1.127 0.04508 0.8062 -1 16 4 evt_6 1 0.9804 0.03921 0.7997 -1 16 5 evt_16 0 0 0 0.9719 -1 16 6 evt_10 0 0 0 0.9566 -1 16 7 evt_4 1 0.2909 0.01163 0.9567 -1 16 8 evt_13 0 0 0 0.9825 -1 16 9 evt_3 1 0.3156 0.01262 0.9166 -1 16 10 evt_19 1 0.5679 0.02271 0.8942 -1 16 11 evt_15 0 0 0 0.9562 -1 16 12 evt_0 0 0 0 0.9679 -1 16 13 evt_1 1 0.3706 0.01482 0.9188 -1 16 14 evt_21 0 0 0 0.9766 -1 16 15 evt_7 0 0 0 0.9807 -1 16 16 evt_5 0 0 0 0.9856 -1 16 17 evt_11 0 0 0 0.9671 -1 16 18 evt_18 0 0 0 0.9765 -1 16 19 evt_9 1 0.5439 0.02175 0.8939 -1 16 20 evt_2 0 0 0 0.9894 -1 16 21 evt_20 1 0.3055 0.01222 0.9141 -1 16 22 evt_23 0 0 0 0.9374 -1 16 23 evt_14 1 0.5174 0.0207 0.8833 -1 17 0 evt_5 0 0 0 0.9853 -1 17 1 evt_15 0 0 0 0.9597 -1 17 2 evt_21 0 0 0 0.9744 -1 17 3 evt_18 0 0 0 0.9769 -1 17 4 evt_12 0 0 0 0.964 -1 17 5 evt_2 0 0 0 0.993 -1 17 6 evt_20 1 0.3066 0.01226 0.9163 -1 17 7 evt_17 1 1.046 0.04183 0.8236 -1 17 8 evt_3 1 0.3027 0.01211 0.9236 -1 17 9 evt_22 1 0.7086 0.02835 0.8867 -1 17 10 evt_11 0 0 0 0.9686 -1 17 11 evt_4 0 0 0 0.9824 -1 17 12 evt_1 1 0.2951 0.0118 0.9383 -1 17 13 evt_10 0 0 0 0.9751 -1 17 14 evt_9 1 0.5061 0.02024 0.9013 -1 17 15 evt_6 1 0.7086 0.02834 0.8227 -1 17 16 evt_19 1 0.5821 0.02328 0.894 -1 17 17 evt_13 0 0 0 0.986 -1 17 18 evt_7 0 0 0 0.9765 -1 17 19 evt_23 0 0 0 0.9376 -1 17 20 evt_14 1 0.2516 0.01006 0.9059 -1 17 21 evt_16 0 0 0 0.9792 -1 17 22 evt_0 0 0 0 0.9686 -1 17 23 evt_8 0 0 0 0.9501 -1 18 0 evt_3 0 0 0 0.9447 -1 18 1 evt_18 0 0 0 0.9807 -1 18 2 evt_15 0 0 0 0.9807 -1 18 3 evt_9 1 0.4177 0.01671 0.9104 -1 18 4 evt_7 0 0 0 0.9741 -1 18 5 evt_4 0 0 0 0.9848 -1 18 6 evt_19 1 0.3376 0.0135 0.9173 -1 18 7 evt_20 1 0.3035 0.01214 0.9178 -1 18 8 evt_13 0 0 0 0.9749 -1 18 9 evt_6 1 0.7076 0.02831 0.8323 -1 18 10 evt_11 0 0 0 0.965 -1 18 11 evt_12 0 0 0 0.9513 -1 18 12 evt_5 0 0 0 0.9758 -1 18 13 evt_14 0 0 0 0.9161 -1 18 14 evt_17 1 0.9858 0.03943 0.8353 -1 18 15 evt_1 1 0.2737 0.01095 0.9434 -1 18 16 evt_23 0 0 0 0.9294 -1 18 17 evt_22 1 0.6672 0.02669 0.8918 -1 18 18 evt_8 0 0 0 0.9465 -1 18 19 evt_10 0 0 0 0.9697 -1 18 20 evt_16 0 0 0 0.9684 -1 18 21 evt_2 0 0 0 0.966 -1 18 22 evt_0 1 0.2618 0.01047 0.9618 -1 18 23 evt_21 1 0.2634 0.01053 0.9437 -1 19 0 evt_6 1 0.6987 0.02795 0.841 -1 19 1 evt_9 1 0.4255 0.01702 0.9097 -1 19 2 evt_23 0 0 0 0.9368 -1 19 3 evt_10 0 0 0 0.9754 -1 19 4 evt_14 1 0.3024 0.0121 0.9021 -1 19 5 evt_21 1 0.3464 0.01386 0.927 -1 19 6 evt_8 0 0 0 0.971 -1 19 7 evt_12 0 0 0 0.9529 -1 19 8 evt_11 0 0 0 0.9682 -1 19 9 evt_19 1 0.4943 0.01977 0.894 -1 19 10 evt_0 0 0 0 0.9573 -1 19 11 evt_18 0 0 0 0.9603 -1 19 12 evt_17 1 0.9695 0.03878 0.8406 -1 19 13 evt_7 1 0.4339 0.01736 0.9288 -1 19 14 evt_22 0 0 0 0.9222 -1 19 15 evt_4 0 0 0 0.9807 -1 19 16 evt_3 1 1.945 0.07781 0.7231 -1 19 17 evt_16 0 0 0 0.9744 -1 19 18 evt_20 1 0.2666 0.01066 0.9301 -1 19 19 evt_2 0 0 0 0.9747 -1 19 20 evt_5 1 0.3454 0.01382 0.9493 -1 19 21 evt_1 0 0 0 0.9566 -1 19 22 evt_15 1 0.4031 0.01613 0.9419 -1 19 23 evt_13 0 0 0 0.9753 -1 20 0 evt_3 0 0 0 0.9611 -1 20 1 evt_6 1 0.6762 0.02705 0.862 -1 20 2 evt_14 1 1.03 0.0412 0.7333 -1 20 3 evt_19 1 0.4509 0.01804 0.9315 -1 20 4 evt_17 1 0.8442 0.03377 0.86 -1 20 5 evt_11 0 0 0 0.9722 -1 20 6 evt_0 0 0 0 0.9607 -1 20 7 evt_12 0 0 0 0.9579 -1 20 8 evt_2 0 0 0 0.9745 -1 20 9 evt_15 0 0 0 0.972 -1 20 10 evt_18 0 0 0 0.9645 -1 20 11 evt_1 0 0 0 0.9656 -1 20 12 evt_21 1 0.3128 0.01251 0.9315 -1 20 13 evt_10 0 0 0 0.9818 -1 20 14 evt_5 1 0.3364 0.01345 0.9515 -1 20 15 evt_13 0 0 0 0.9762 -1 20 16 evt_9 1 0.3258 0.01303 0.9235 -1 20 17 evt_7 0 0 0 0.9715 -1 20 18 evt_8 0 0 0 0.9837 -1 20 19 evt_16 1 1.359 0.05437 0.7885 -1 20 20 evt_22 1 0.7098 0.02839 0.8707 -1 20 21 evt_4 0 0 0 0.9791 -1 20 22 evt_20 0 0 0 0.9382 -1 20 23 evt_23 0 0 0 0.948 -1 21 0 evt_6 1 0.6586 0.02635 0.8699 -1 21 1 evt_3 0 0 0 0.9576 -1 21 2 evt_19 1 0.4072 0.01629 0.9386 -1 21 3 evt_23 0 0 0 0.9498 -1 21 4 evt_17 1 0.7512 0.03005 0.8761 -1 21 5 evt_16 0 0 0 0.9811 -1 21 6 evt_22 0 0 0 0.9591 -1 21 7 evt_4 0 0 0 0.9715 -1 21 8 evt_10 0 0 0 0.9833 -1 21 9 evt_8 0 0 0 0.9799 -1 21 10 evt_1 0 0 0 0.9684 -1 21 11 evt_12 0 0 0 0.9652 -1 21 12 evt_9 1 0.3566 0.01426 0.905 -1 21 13 evt_15 0 0 0 0.9745 -1 21 14 evt_14 1 0.3147 0.01259 0.9038 -1 21 15 evt_0 0 0 0 0.9735 -1 21 16 evt_18 0 0 0 0.9717 -1 21 17 evt_11 0 0 0 0.9766 -1 21 18 evt_7 0 0 0 0.9863 -1 21 19 evt_21 0 0 0 0.9626 -1 21 20 evt_13 0 0 0 0.9833 -1 21 21 evt_5 0 0 0 0.967 -1 21 22 evt_20 0 0 0 0.9478 -1 21 23 evt_2 0 0 0 0.9899 -1 22 0 evt_7 0 0 0 0.9863 -1 22 1 evt_21 0 0 0 0.9763 -1 22 2 evt_15 0 0 0 0.9783 -1 22 3 evt_1 0 0 0 0.9718 -1 22 4 evt_20 0 0 0 0.9508 -1 22 5 evt_2 0 0 0 0.9945 -1 22 6 evt_11 0 0 0 0.9799 -1 22 7 evt_0 0 0 0 0.9884 -1 22 8 evt_3 0 0 0 0.9685 -1 22 9 evt_8 0 0 0 0.9782 -1 22 10 evt_5 1 0.3298 0.01319 0.956 -1 22 11 evt_14 1 0.4011 0.01604 0.8923 -1 22 12 evt_17 1 0.6591 0.02637 0.8915 -1 22 13 evt_19 1 0.3406 0.01362 0.9471 -1 22 14 evt_13 0 0 0 0.9872 -1 22 15 evt_4 0 0 0 0.9738 -1 22 16 evt_6 1 0.6492 0.02597 0.8652 -1 22 17 evt_9 0 0 0 0.9574 -1 22 18 evt_12 0 0 0 0.9662 -1 22 19 evt_18 0 0 0 0.9816 -1 22 20 evt_16 0 0 0 0.9843 -1 22 21 evt_10 0 0 0 0.973 -1 22 22 evt_22 0 0 0 0.9464 -1 22 23 evt_23 0 0 0 0.956 -1 23 0 evt_21 0 0 0 0.9783 -1 23 1 evt_3 0 0 0 0.9701 -1 23 2 evt_10 0 0 0 0.9766 -1 23 3 evt_11 0 0 0 0.981 -1 23 4 evt_20 0 0 0 0.9598 -1 23 5 evt_1 0 0 0 0.9753 -1 23 6 evt_13 0 0 0 0.9918 -1 23 7 evt_18 0 0 0 0.9876 -1 23 8 evt_19 1 0.2572 0.01029 0.9557 -1 23 9 evt_0 1 0.2535 0.01014 0.9314 -1 23 10 evt_2 0 0 0 0.9935 -1 23 11 evt_4 0 0 0 0.989 -1 23 12 evt_14 1 0.3947 0.01579 0.8939 -1 23 13 evt_9 0 0 0 0.9632 -1 23 14 evt_16 0 0 0 0.9864 -1 23 15 evt_5 1 0.3215 0.01286 0.9574 -1 23 16 evt_17 1 0.252 0.01008 0.9 -1 23 17 evt_12 0 0 0 0.9669 -1 23 18 evt_15 0 0 0 0.9834 -1 23 19 evt_22 0 0 0 0.9467 -1 23 20 evt_8 0 0 0 0.9806 -1 23 21 evt_6 1 0.6469 0.02587 0.859 -1 23 22 evt_7 0 0 0 0.9907 -1 23 23 evt_23 0 0 0 0.9626 -1 24 0 evt_6 1 0.6377 0.02551 0.8627 -1 24 1 evt_2 0 0 0 0.9941 -1 24 2 evt_10 0 0 0 0.9862 -1 24 3 evt_18 0 0 0 0.9911 -1 24 4 evt_21 0 0 0 0.9825 -1 24 5 evt_11 0 0 0 0.9838 -1 24 6 evt_16 0 0 0 0.9817 -1 24 7 evt_14 1 0.3079 0.01232 0.9166 -1 24 8 evt_3 0 0 0 0.9755 -1 24 9 evt_0 0 0 0 0.9623 -1 24 10 evt_1 0 0 0 0.9754 -1 24 11 evt_19 0 0 0 0.961 -1 24 12 evt_7 0 0 0 0.992 -1 24 13 evt_22 0 0 0 0.952 -1 24 14 evt_13 0 0 0 0.9935 -1 24 15 evt_8 0 0 0 0.9842 -1 24 16 evt_23 0 0 0 0.9653 -1 24 17 evt_12 0 0 0 0.9704 -1 24 18 evt_17 1 0.4647 0.01859 0.8952 -1 24 19 evt_4 0 0 0 0.989 -1 24 20 evt_20 0 0 0 0.9703 -1 24 21 evt_5 0 0 0 0.9689 -1 24 22 evt_9 0 0 0 0.9662 -1 24 23 evt_15 0 0 0 0.9865 -1 25 0 evt_6 1 0.5792 0.02317 0.8674 -1 25 1 evt_17 1 0.2604 0.01042 0.9411 -1 25 2 evt_9 0 0 0 0.9679 -1 25 3 evt_8 0 0 0 0.9893 -1 25 4 evt_13 0 0 0 0.9945 -1 25 5 evt_0 0 0 0 0.9619 -1 25 6 evt_22 0 0 0 0.9633 -1 25 7 evt_23 0 0 0 0.9703 -1 25 8 evt_7 0 0 0 0.9927 -1 25 9 evt_5 0 0 0 0.981 -1 25 10 evt_19 0 0 0 0.9642 -1 25 11 evt_4 0 0 0 0.9904 -1 25 12 evt_12 0 0 0 0.9755 -1 25 13 evt_1 0 0 0 0.9765 -1 25 14 evt_11 0 0 0 0.9845 -1 25 15 evt_21 0 0 0 0.9785 -1 25 16 evt_10 0 0 0 0.9832 -1 25 17 evt_16 0 0 0 0.9938 -1 25 18 evt_15 0 0 0 0.9895 -1 25 19 evt_2 0 0 0 0.9937 -1 25 20 evt_14 1 0.5555 0.02222 0.8766 -1 25 21 evt_3 0 0 0 0.9814 -1 25 22 evt_20 0 0 0 0.9772 -1 25 23 evt_18 0 0 0 0.9954 -1 26 0 evt_23 0 0 0 0.9719 -1 26 1 evt_3 0 0 0 0.9831 -1 26 2 evt_15 0 0 0 0.9929 -1 26 3 evt_12 0 0 0 0.9809 -1 26 4 evt_13 0 0 0 0.9954 -1 26 5 evt_21 0 0 0 0.984 -1 26 6 evt_10 0 0 0 0.9842 -1 26 7 evt_11 0 0 0 0.9852 -1 26 8 evt_6 1 2.551 0.1021 0.5686 -1 26 9 evt_18 0 0 0 0.996 -1 26 10 evt_20 0 0 0 0.9824 -1 26 11 evt_16 0 0 0 0.9811 -1 26 12 evt_0 0 0 0 0.9674 -1 26 13 evt_14 0 0 0 0.9338 -1 26 14 evt_17 1 0.3827 0.01531 0.9274 -1 26 15 evt_2 0 0 0 0.9964 -1 26 16 evt_7 0 0 0 0.9933 -1 26 17 evt_1 0 0 0 0.9787 -1 26 18 evt_5 0 0 0 0.9849 -1 26 19 evt_19 0 0 0 0.968 -1 26 20 evt_8 0 0 0 0.9878 -1 26 21 evt_22 0 0 0 0.9717 -1 26 22 evt_4 0 0 0 0.9919 -1 26 23 evt_9 0 0 0 0.974 -1 27 0 evt_23 0 0 0 0.9742 -1 27 1 evt_0 0 0 0 0.9772 -1 27 2 evt_12 0 0 0 0.9828 -1 27 3 evt_6 1 0.4015 0.01606 0.8924 -1 27 4 evt_10 0 0 0 0.9847 -1 27 5 evt_9 0 0 0 0.9732 -1 27 6 evt_13 0 0 0 0.9962 -1 27 7 evt_11 0 0 0 0.9858 -1 27 8 evt_17 1 0.299 0.01196 0.9413 -1 27 9 evt_15 0 0 0 0.9955 -1 27 10 evt_2 0 0 0 0.9965 -1 27 11 evt_8 0 0 0 0.9905 -1 27 12 evt_4 0 0 0 0.9919 -1 27 13 evt_16 0 0 0 0.9944 -1 27 14 evt_21 0 0 0 0.9904 -1 27 15 evt_1 0 0 0 0.98 -1 27 16 evt_19 0 0 0 0.9708 -1 27 17 evt_7 0 0 0 0.9944 -1 27 18 evt_20 0 0 0 0.9841 -1 27 19 evt_14 0 0 0 0.9241 -1 27 20 evt_5 0 0 0 0.9853 -1 27 21 evt_18 0 0 0 0.9962 -1 27 22 evt_3 0 0 0 0.9879 -1 27 23 evt_22 0 0 0 0.9726 -1 28 0 evt_1 0 0 0 0.9823 -1 28 1 evt_22 0 0 0 0.9776 -1 28 2 evt_0 0 0 0 0.9857 -1 28 3 evt_14 0 0 0 0.9384 -1 28 4 evt_2 0 0 0 0.9968 -1 28 5 evt_10 0 0 0 0.9791 -1 28 6 evt_11 0 0 0 0.9853 -1 28 7 evt_17 0 0 0 0.9506 -1 28 8 evt_12 0 0 0 0.9852 -1 28 9 evt_6 1 0.2999 0.01199 0.8946 -1 28 10 evt_19 0 0 0 0.9734 -1 28 11 evt_20 0 0 0 0.9863 -1 28 12 evt_13 0 0 0 0.9971 -1 28 13 evt_21 0 0 0 0.9897 -1 28 14 evt_4 0 0 0 0.993 -1 28 15 evt_7 0 0 0 0.9944 -1 28 16 evt_9 0 0 0 0.9736 -1 28 17 evt_5 0 0 0 0.9863 -1 28 18 evt_15 0 0 0 0.9964 -1 28 19 evt_23 0 0 0 0.9808 -1 28 20 evt_8 0 0 0 0.9908 -1 28 21 evt_3 0 0 0 0.9892 -1 28 22 evt_16 0 0 0 0.9856 -1 28 23 evt_18 0 0 0 0.9965 -1 29 0 evt_12 0 0 0 0.9865 -1 29 1 evt_15 0 0 0 0.9969 -1 29 2 evt_6 1 0.2931 0.01173 0.9019 -1 29 3 evt_4 0 0 0 0.994 -1 29 4 evt_2 0 0 0 0.9982 -1 29 5 evt_5 0 0 0 0.988 -1 29 6 evt_21 0 0 0 0.9895 -1 29 7 evt_1 0 0 0 0.9804 -1 29 8 evt_16 0 0 0 0.9801 -1 29 9 evt_20 0 0 0 0.9848 -1 29 10 evt_22 0 0 0 0.9742 -1 29 11 evt_8 0 0 0 0.9939 -1 29 12 evt_0 0 0 0 0.9901 -1 29 13 evt_14 0 0 0 0.9568 -1 29 14 evt_3 0 0 0 0.9895 -1 29 15 evt_23 0 0 0 0.9829 -1 29 16 evt_11 0 0 0 0.9852 -1 29 17 evt_7 0 0 0 0.9943 -1 29 18 evt_10 0 0 0 0.9845 -1 29 19 evt_19 0 0 0 0.974 -1 29 20 evt_13 0 0 0 0.9967 -1 29 21 evt_17 0 0 0 0.9623 -1 29 22 evt_18 0 0 0 0.9959 -1 29 23 evt_9 0 0 0 0.978 -1 30 0 evt_6 1 0.2556 0.01022 0.9203 -1 30 1 evt_11 0 0 0 0.9858 -1 30 2 evt_17 0 0 0 0.9665 -1 30 3 evt_23 0 0 0 0.9824 -1 30 4 evt_14 0 0 0 0.9607 -1 30 5 evt_18 0 0 0 0.9959 -1 30 6 evt_13 0 0 0 0.9963 -1 30 7 evt_16 0 0 0 0.9836 -1 30 8 evt_20 0 0 0 0.9859 -1 30 9 evt_15 0 0 0 0.9935 -1 30 10 evt_0 0 0 0 0.9933 -1 30 11 evt_2 0 0 0 0.9985 -1 30 12 evt_12 0 0 0 0.9888 -1 30 13 evt_19 0 0 0 0.9771 -1 30 14 evt_7 0 0 0 0.9927 -1 30 15 evt_3 0 0 0 0.9881 -1 30 16 evt_21 0 0 0 0.9926 -1 30 17 evt_5 0 0 0 0.9899 -1 30 18 evt_9 0 0 0 0.9814 -1 30 19 evt_22 0 0 0 0.9763 -1 30 20 evt_10 0 0 0 0.9879 -1 30 21 evt_8 0 0 0 0.9928 -1 30 22 evt_4 0 0 0 0.9949 -1 30 23 evt_1 0 0 0 0.9849 -1 31 0 evt_17 0 0 0 0.9756 -1 31 1 evt_16 0 0 0 0.9785 -1 31 2 evt_4 0 0 0 0.9957 -1 31 3 evt_3 0 0 0 0.9906 -1 31 4 evt_19 0 0 0 0.9819 -1 31 5 evt_11 0 0 0 0.9874 -1 31 6 evt_1 0 0 0 0.9811 -1 31 7 evt_15 0 0 0 0.9949 -1 31 8 evt_6 1 0.2503 0.01001 0.9265 -1 31 9 evt_14 0 0 0 0.967 -1 31 10 evt_13 0 0 0 0.9966 -1 31 11 evt_20 0 0 0 0.9863 -1 31 12 evt_21 0 0 0 0.994 -1 31 13 evt_8 0 0 0 0.9945 -1 31 14 evt_5 0 0 0 0.9919 -1 31 15 evt_10 0 0 0 0.9879 -1 31 16 evt_23 0 0 0 0.9855 -1 31 17 evt_9 0 0 0 0.9874 -1 31 18 evt_2 0 0 0 0.9986 -1 31 19 evt_7 0 0 0 0.9907 -1 31 20 evt_12 0 0 0 0.99 -1 31 21 evt_22 0 0 0 0.9771 -1 31 22 evt_0 0 0 0 0.9944 -1 31 23 evt_18 0 0 0 0.9973 -1 32 0 evt_0 0 0 0 0.9938 -1 32 1 evt_13 0 0 0 0.9971 -1 32 2 evt_19 0 0 0 0.9843 -1 32 3 evt_2 0 0 0 0.9986 -1 32 4 evt_4 0 0 0 0.9966 -1 32 5 evt_6 0 0 0 0.9428 -1 32 6 evt_10 0 0 0 0.9909 -1 32 7 evt_11 0 0 0 0.9877 -1 32 8 evt_21 0 0 0 0.9935 -1 32 9 evt_1 0 0 0 0.9823 -1 32 10 evt_9 0 0 0 0.9901 -1 32 11 evt_23 0 0 0 0.9898 -1 32 12 evt_5 0 0 0 0.9925 -1 32 13 evt_17 0 0 0 0.9843 -1 32 14 evt_16 0 0 0 0.9862 -1 32 15 evt_14 0 0 0 0.9761 -1 32 16 evt_8 0 0 0 0.9955 -1 32 17 evt_15 0 0 0 0.9972 -1 32 18 evt_3 0 0 0 0.9899 -1 32 19 evt_22 0 0 0 0.9798 -1 32 20 evt_18 0 0 0 0.9976 -1 32 21 evt_7 0 0 0 0.9899 -1 32 22 evt_20 0 0 0 0.9863 -1 32 23 evt_12 0 0 0 0.9909 -1 33 0 evt_12 0 0 0 0.9912 -1 33 1 evt_18 0 0 0 0.9976 -1 33 2 evt_10 0 0 0 0.9908 -1 33 3 evt_5 0 0 0 0.9931 -1 33 4 evt_7 0 0 0 0.9898 -1 33 5 evt_20 0 0 0 0.9759 -1 33 6 evt_17 0 0 0 0.9885 -1 33 7 evt_11 0 0 0 0.9884 -1 33 8 evt_16 0 0 0 0.991 -1 33 9 evt_23 0 0 0 0.9908 -1 33 10 evt_6 0 0 0 0.9503 -1 33 11 evt_2 0 0 0 0.9974 -1 33 12 evt_4 0 0 0 0.9969 -1 33 13 evt_19 0 0 0 0.9855 -1 33 14 evt_14 0 0 0 0.9769 -1 33 15 evt_9 0 0 0 0.9903 -1 33 16 evt_1 0 0 0 0.9871 -1 33 17 evt_8 0 0 0 0.9964 -1 33 18 evt_21 0 0 0 0.9892 -1 33 19 evt_22 0 0 0 0.9823 -1 33 20 evt_3 0 0 0 0.9905 -1 33 21 evt_0 0 0 0 0.9943 -1 33 22 evt_13 0 0 0 0.9978 -1 33 23 evt_15 0 0 0 0.9973 -1 34 0 evt_2 0 0 0 0.9976 -1 34 1 evt_0 0 0 0 0.9946 -1 34 2 evt_22 0 0 0 0.9846 -1 34 3 evt_14 0 0 0 0.9823 -1 34 4 evt_23 0 0 0 0.986 -1 34 5 evt_9 0 0 0 0.9905 -1 34 6 evt_13 0 0 0 0.9981 -1 34 7 evt_3 0 0 0 0.9923 -1 34 8 evt_11 0 0 0 0.9893 -1 34 9 evt_21 0 0 0 0.9898 -1 34 10 evt_15 0 0 0 0.9971 -1 34 11 evt_18 0 0 0 0.9977 -1 34 12 evt_8 0 0 0 0.9963 -1 34 13 evt_6 0 0 0 0.9559 -1 34 14 evt_7 0 0 0 0.9914 -1 34 15 evt_5 0 0 0 0.9931 -1 34 16 evt_19 0 0 0 0.987 -1 34 17 evt_12 0 0 0 0.9926 -1 34 18 evt_10 0 0 0 0.9842 -1 34 19 evt_17 0 0 0 0.9884 -1 34 20 evt_16 0 0 0 0.9959 -1 34 21 evt_20 0 0 0 0.9788 -1 34 22 evt_1 0 0 0 0.9814 -1 34 23 evt_4 0 0 0 0.9979 -1 35 0 evt_16 0 0 0 0.9942 -1 35 1 evt_20 0 0 0 0.9815 -1 35 2 evt_7 0 0 0 0.9954 -1 35 3 evt_1 0 0 0 0.9822 -1 35 4 evt_12 0 0 0 0.9933 -1 35 5 evt_17 0 0 0 0.9889 -1 35 6 evt_15 0 0 0 0.9975 -1 35 7 evt_22 0 0 0 0.9873 -1 35 8 evt_11 0 0 0 0.991 -1 35 9 evt_0 0 0 0 0.9948 -1 35 10 evt_4 0 0 0 0.998 -1 35 11 evt_23 0 0 0 0.9849 -1 35 12 evt_6 0 0 0 0.9342 -1 35 13 evt_10 0 0 0 0.9908 -1 35 14 evt_3 0 0 0 0.9931 -1 35 15 evt_18 0 0 0 0.9977 -1 35 16 evt_21 0 0 0 0.9937 -1 35 17 evt_8 0 0 0 0.9972 -1 35 18 evt_9 0 0 0 0.9908 -1 35 19 evt_2 0 0 0 0.9968 -1 35 20 evt_14 0 0 0 0.975 -1 35 21 evt_13 0 0 0 0.9979 -1 35 22 evt_5 0 0 0 0.9935 -1 35 23 evt_19 0 0 0 0.9871 -1 36 0 evt_7 0 0 0 0.9967 -1 36 1 evt_2 0 0 0 0.9984 -1 36 2 evt_22 0 0 0 0.9893 -1 36 3 evt_21 0 0 0 0.9941 -1 36 4 evt_19 0 0 0 0.9884 -1 36 5 evt_8 0 0 0 0.9975 -1 36 6 evt_18 0 0 0 0.9978 -1 36 7 evt_11 0 0 0 0.9922 -1 36 8 evt_12 0 0 0 0.9945 -1 36 9 evt_5 0 0 0 0.9939 -1 36 10 evt_10 0 0 0 0.9937 -1 36 11 evt_16 0 0 0 0.9935 -1 36 12 evt_4 0 0 0 0.9983 -1 36 13 evt_17 0 0 0 0.99 -1 36 14 evt_3 0 0 0 0.991 -1 36 15 evt_23 0 0 0 0.9869 -1 36 16 evt_20 0 0 0 0.9888 -1 36 17 evt_0 0 0 0 0.9962 -1 36 18 evt_14 0 0 0 0.9789 -1 36 19 evt_15 0 0 0 0.9968 -1 36 20 evt_9 0 0 0 0.9931 -1 36 21 evt_13 0 0 0 0.9931 -1 36 22 evt_1 0 0 0 0.9836 -1 36 23 evt_6 1 0.3285 0.01314 0.9396 -1 37 0 evt_15 0 0 0 0.9971 -1 37 1 evt_6 1 0.2702 0.01081 0.9481 -1 37 2 evt_23 0 0 0 0.992 -1 37 3 evt_19 0 0 0 0.9884 -1 37 4 evt_11 0 0 0 0.9934 -1 37 5 evt_12 0 0 0 0.9946 -1 37 6 evt_17 0 0 0 0.9913 -1 37 7 evt_0 0 0 0 0.9964 -1 37 8 evt_21 0 0 0 0.9949 -1 37 9 evt_10 0 0 0 0.9937 -1 37 10 evt_16 0 0 0 0.9957 -1 37 11 evt_18 0 0 0 0.998 -1 37 12 evt_1 0 0 0 0.9843 -1 37 13 evt_20 0 0 0 0.9855 -1 37 14 evt_7 0 0 0 0.9972 -1 37 15 evt_14 0 0 0 0.9838 -1 37 16 evt_3 0 0 0 0.9906 -1 37 17 evt_8 0 0 0 0.9977 -1 37 18 evt_4 0 0 0 0.9985 -1 37 19 evt_22 0 0 0 0.9835 -1 37 20 evt_9 0 0 0 0.9869 -1 37 21 evt_13 0 0 0 0.994 -1 37 22 evt_5 0 0 0 0.9944 -1 37 23 evt_2 0 0 0 0.9985 -1 38 0 evt_4 0 0 0 0.9986 -1 38 1 evt_12 0 0 0 0.9952 -1 38 2 evt_20 0 0 0 0.9887 -1 38 3 evt_22 0 0 0 0.9933 -1 38 4 evt_11 0 0 0 0.9943 -1 38 5 evt_0 0 0 0 0.9967 -1 38 6 evt_10 0 0 0 0.9945 -1 38 7 evt_19 0 0 0 0.989 -1 38 8 evt_3 0 0 0 0.9891 -1 38 9 evt_15 0 0 0 0.9971 -1 38 10 evt_21 0 0 0 0.9957 -1 38 11 evt_2 0 0 0 0.9985 -1 38 12 evt_1 0 0 0 0.9908 -1 38 13 evt_5 0 0 0 0.9945 -1 38 14 evt_8 0 0 0 0.9977 -1 38 15 evt_18 0 0 0 0.998 -1 38 16 evt_6 0 0 0 0.9441 -1 38 17 evt_7 0 0 0 0.9955 -1 38 18 evt_14 0 0 0 0.9833 -1 38 19 evt_16 0 0 0 0.994 -1 38 20 evt_9 0 0 0 0.9882 -1 38 21 evt_23 0 0 0 0.9927 -1 38 22 evt_17 0 0 0 0.992 -1 38 23 evt_13 0 0 0 0.996 -1 39 0 evt_9 0 0 0 0.9915 -1 39 1 evt_11 0 0 0 0.9946 -1 39 2 evt_16 0 0 0 0.9975 -1 39 3 evt_3 0 0 0 0.9903 -1 39 4 evt_5 0 0 0 0.9956 -1 39 5 evt_0 0 0 0 0.9973 -1 39 6 evt_15 0 0 0 0.9968 -1 39 7 evt_6 0 0 0 0.9538 -1 39 8 evt_23 0 0 0 0.9889 -1 39 9 evt_22 0 0 0 0.9919 -1 39 10 evt_1 0 0 0 0.9848 -1 39 11 evt_7 0 0 0 0.9953 -1 39 12 evt_10 0 0 0 0.9858 -1 39 13 evt_2 0 0 0 0.9985 -1 39 14 evt_17 0 0 0 0.9924 -1 39 15 evt_13 0 0 0 0.9966 -1 39 16 evt_12 0 0 0 0.9958 -1 39 17 evt_21 0 0 0 0.9953 -1 39 18 evt_19 0 0 0 0.9895 -1 39 19 evt_8 0 0 0 0.9977 -1 39 20 evt_20 0 0 0 0.9907 -1 39 21 evt_18 0 0 0 0.997 -1 39 22 evt_14 0 0 0 0.9868 -1 39 23 evt_4 0 0 0 0.9987 -1 40 0 evt_9 0 0 0 0.9933 -1 40 1 evt_4 0 0 0 0.9987 -1 40 2 evt_15 0 0 0 0.9976 -1 40 3 evt_7 0 0 0 0.9956 -1 40 4 evt_14 0 0 0 0.9814 -1 40 5 evt_21 0 0 0 0.9948 -1 40 6 evt_12 0 0 0 0.996 -1 40 7 evt_1 0 0 0 0.9847 -1 40 8 evt_3 0 0 0 0.9902 -1 40 9 evt_18 0 0 0 0.9968 -1 40 10 evt_17 0 0 0 0.9941 -1 40 11 evt_23 0 0 0 0.9901 -1 40 12 evt_19 0 0 0 0.9899 -1 40 13 evt_11 0 0 0 0.9954 -1 40 14 evt_5 0 0 0 0.9962 -1 40 15 evt_6 0 0 0 0.9621 -1 40 16 evt_2 0 0 0 0.9983 -1 40 17 evt_20 0 0 0 0.9914 -1 40 18 evt_16 0 0 0 0.9979 -1 40 19 evt_10 0 0 0 0.9838 -1 40 20 evt_22 0 0 0 0.9921 -1 40 21 evt_13 0 0 0 0.9972 -1 40 22 evt_0 0 0 0 0.9813 -1 40 23 evt_8 0 0 0 0.9977 -1 41 0 evt_7 0 0 0 0.9968 -1 41 1 evt_5 0 0 0 0.9968 -1 41 2 evt_23 0 0 0 0.9934 -1 41 3 evt_12 0 0 0 0.9961 -1 41 4 evt_3 0 0 0 0.9929 -1 41 5 evt_2 0 0 0 0.9985 -1 41 6 evt_14 0 0 0 0.986 -1 41 7 evt_18 0 0 0 0.998 -1 41 8 evt_10 0 0 0 0.9959 -1 41 9 evt_1 0 0 0 0.9853 -1 41 10 evt_22 0 0 0 0.9933 -1 41 11 evt_19 0 0 0 0.9905 -1 41 12 evt_4 0 0 0 0.9986 -1 41 13 evt_17 0 0 0 0.9944 -1 41 14 evt_20 0 0 0 0.9909 -1 41 15 evt_6 0 0 0 0.9705 -1 41 16 evt_13 0 0 0 0.9975 -1 41 17 evt_21 0 0 0 0.9888 -1 41 18 evt_15 0 0 0 0.9972 -1 41 19 evt_0 0 0 0 0.9798 -1 41 20 evt_11 0 0 0 0.9943 -1 41 21 evt_9 0 0 0 0.9939 -1 41 22 evt_16 0 0 0 0.9983 -1 41 23 evt_8 0 0 0 0.9978 -1 42 0 evt_18 0 0 0 0.9983 -1 42 1 evt_23 0 0 0 0.9895 -1 42 2 evt_22 0 0 0 0.9932 -1 42 3 evt_2 0 0 0 0.9983 -1 42 4 evt_6 0 0 0 0.9759 -1 42 5 evt_5 0 0 0 0.9968 -1 42 6 evt_0 0 0 0 0.9805 -1 42 7 evt_9 0 0 0 0.9941 -1 42 8 evt_3 0 0 0 0.9945 -1 42 9 evt_16 0 0 0 0.9982 -1 42 10 evt_17 0 0 0 0.9948 -1 42 11 evt_4 0 0 0 0.9887 -1 42 12 evt_13 0 0 0 0.9984 -1 42 13 evt_15 0 0 0 0.9977 -1 42 14 evt_8 0 0 0 0.9978 -1 42 15 evt_12 0 0 0 0.9966 -1 42 16 evt_20 0 0 0 0.992 -1 42 17 evt_19 0 0 0 0.9905 -1 42 18 evt_1 0 0 0 0.9903 -1 42 19 evt_14 0 0 0 0.9867 -1 42 20 evt_10 0 0 0 0.9958 -1 42 21 evt_7 0 0 0 0.998 -1 42 22 evt_11 0 0 0 0.9945 -1 42 23 evt_21 0 0 0 0.9911 diff --git a/tensor/cmd/ttail/testdata/actual_csv.csv b/tensor/cmd/ttail/testdata/actual_csv.csv deleted file mode 100644 index 94b7374ef7..0000000000 --- a/tensor/cmd/ttail/testdata/actual_csv.csv +++ /dev/null @@ -1,51 +0,0 @@ -_H:,|Run,$Params,#FirstZero,#SSE,#AvgSSE,#PctErr,#PctCor,#CosDiff,#Test CorrECs,#Test CosDiffIap,#Test CosDiffIbp,#Test CosDiffCap,#Test CosDiffCbp,#Test Rcll,#Test Fami,#Lure CorrECs,#Lure CosDiffIap,#Lure CosDiffIbp,#Lure CosDiffCap,#Lure CosDiffCbp,#Lure Rcll,#Lure Fami -_D:,0,Base,-1,4433,0.1885,1,0,0.3327,0.8324,0.1602,0.1399,0.04461,0.1373,0.675,0.525,0.8136,0.06404,0.09654,0.01296,0.1022,0.425,0.375 -_D:,1,Base,-1,4458,0.1895,1,0,0.3199,0.858,0.1721,0.2119,0.07375,0.1716,0.8,0.675,0.833,0.00141,0.01682,0.04151,0.08044,0.7,0.2 -_D:,2,Base,-1,4410,0.1875,1,0,0.3402,0.8514,0.2204,0.1827,0.1035,0.19,0.775,0.7,0.8104,0.07604,0.04317,0.06058,0.148,0.375,0.225 -_D:,3,Base,-1,4402,0.1872,1,0,0.3352,0.834,0.1351,0.1484,0.1551,0.1437,0.7,0.525,0.8127,-0.01961,0.01865,0.07977,0.1305,0.5,0.15 -_D:,4,Base,-1,4796,0.2039,1,0,0.2744,0.8494,0.1388,0.07558,0.09007,0.147,0.825,0.5,0.8181,0.05024,-0.002732,0.0384,0.108,0.475,0.275 -_D:,5,Base,-1,4517,0.192,1,0,0.3121,0.8544,0.0733,0.1196,0.1393,0.07325,0.75,0.5,0.8428,0.0003165,0.004615,0.1277,0.06059,0.8,0.125 -_D:,6,Base,-1,4651,0.1977,1,0,0.2901,0.8502,0.1421,0.1121,0.1594,0.1853,0.75,0.475,0.8453,0.04084,0.03057,0.1617,0.1808,0.725,0.225 -_D:,7,Base,-1,4423,0.188,1,0,0.3385,0.8485,0.1486,0.1449,0.1315,0.1148,0.775,0.55,0.8224,-0.03416,0.05164,0.05215,0.07778,0.5,0.25 -_D:,8,Base,-1,4570,0.1943,1,0,0.3159,0.8561,0.131,0.1974,0.1246,0.1336,0.8,0.675,0.8246,0.01257,0.01026,0.08611,0.1148,0.6,0.175 -_D:,9,Base,-1,4368,0.1857,1,0,0.3439,0.8428,0.2023,0.1534,0.0626,0.1635,0.7,0.575,0.8038,0.01742,0.03507,0.05153,0.1214,0.375,0.225 -_D:,10,Base,-1,4471,0.1901,1,0,0.3298,0.8309,0.08743,0.09313,0.09926,0.1483,0.6,0.425,0.8112,0.03346,0.002541,0.08583,0.0989,0.45,0.175 -_D:,11,Base,-1,4240,0.1803,1,0,0.3546,0.8416,0.11,0.09536,0.06674,0.1161,0.75,0.375,0.8145,0.01458,0.006233,0.07063,0.08857,0.475,0.1 -_D:,12,Base,-1,4389,0.1866,1,0,0.3482,0.8305,0.1221,0.1239,0.06907,0.14,0.625,0.575,0.7966,0.02257,0.02086,0.009412,0.07069,0.25,0.175 -_D:,13,Base,-1,4550,0.1934,1,0,0.3131,0.8368,0.1861,0.1736,0.1326,0.07795,0.65,0.675,0.8039,0.03906,0.01382,0.116,0.08777,0.35,0.25 -_D:,14,Base,-1,4403,0.1872,1,0,0.3325,0.8332,0.1536,0.1206,0.1003,0.1366,0.65,0.55,0.8057,0.031,0.03315,0.09155,0.1314,0.375,0.275 -_D:,15,Base,-1,4496,0.1912,1,0,0.3088,0.8602,0.1551,0.1861,0.1243,0.1242,0.825,0.65,0.8261,0.04527,0.0554,0.06897,0.07225,0.7,0.25 -_D:,16,Base,-1,4292,0.1825,1,0,0.3573,0.8364,0.1541,0.1811,0.06818,0.1442,0.625,0.6,0.8094,0.02545,0.0319,0.04543,0.1378,0.425,0.175 -_D:,17,Base,-1,4635,0.1971,1,0,0.3065,0.84,0.06302,0.1168,0.1048,0.2138,0.65,0.5,0.8139,0.03731,0.03213,0.09163,0.1915,0.425,0.3 -_D:,18,Base,-1,4472,0.1901,1,0,0.3285,0.8488,0.1601,0.1733,0.1122,0.1599,0.825,0.5,0.824,0.02248,0.02425,0.09422,0.1235,0.525,0.225 -_D:,19,Base,-1,4639,0.1973,1,0,0.3175,0.8422,0.1134,0.1369,0.09992,0.2229,0.625,0.475,0.8276,0.04992,0.03533,0.08638,0.1812,0.525,0.2 -_D:,20,Base,-1,4202,0.1787,1,0,0.3621,0.8411,0.142,0.1896,0.07698,0.1464,0.725,0.625,0.798,0.01359,0.03396,0.04937,0.1454,0.225,0.175 -_D:,21,Base,-1,4293,0.1825,1,0,0.3517,0.824,0.1187,0.0874,0.07912,0.151,0.6,0.4,0.7899,-0.01185,0.02314,0.04036,0.1012,0.225,0.125 -_D:,22,Base,-1,4359,0.1853,1,0,0.3471,0.8333,0.1297,0.05905,0.1204,0.1104,0.675,0.375,0.8115,-0.03251,0.003859,0.0779,0.09754,0.4,0.1 -_D:,23,Base,-1,4729,0.2011,1,0,0.2856,0.8407,0.1146,0.1772,0.1462,0.1292,0.6,0.5,0.8194,0.03615,0.0456,0.08731,0.05294,0.5,0.225 -_D:,24,Base,-1,4694,0.1996,1,0,0.2955,0.8511,0.1584,0.164,0.1114,0.161,0.775,0.725,0.81,0.004854,0.02887,0.002941,0.1263,0.425,0.25 -_D:,25,Base,-1,4407,0.1874,1,0,0.3388,0.8424,0.1797,0.1757,0.1199,0.1246,0.675,0.575,0.8122,0.008896,0.01536,0.1022,0.09875,0.475,0.225 -_D:,26,Base,-1,4308,0.1831,1,0,0.3579,0.8345,0.196,0.1249,0.06044,0.173,0.575,0.6,0.8026,0.03943,0.006154,0.04586,0.124,0.4,0.2 -_D:,27,Base,-1,4519,0.1921,1,0,0.3197,0.8443,0.1696,0.1383,0.144,0.1449,0.675,0.6,0.8116,0.03426,0.02072,0.1077,0.0631,0.375,0.275 -_D:,28,Base,-1,4350,0.185,1,0,0.3396,0.8516,0.1516,0.1326,0.1162,0.2181,0.75,0.625,0.8256,0.009512,0.05011,0.08527,0.1867,0.65,0.2 -_D:,29,Base,-1,4423,0.188,1,0,0.3333,0.8198,0.1035,0.1131,0.07251,0.1276,0.425,0.475,0.7992,0.03496,0.03105,0.07669,0.08504,0.275,0.325 -_D:,30,Base,-1,4452,0.1893,1,0,0.3229,0.8577,0.1288,0.1333,0.1229,0.1506,0.9,0.55,0.8258,0.03333,0.04937,0.05749,0.09554,0.6,0.35 -_D:,31,Base,-1,4363,0.1855,1,0,0.3466,0.8482,0.1561,0.1215,0.1209,0.2282,0.775,0.575,0.8196,0.009875,-0.004951,0.08399,0.1954,0.4,0.2 -_D:,32,Base,-1,4597,0.1955,1,0,0.3055,0.8465,0.1402,0.1327,0.12,0.1194,0.825,0.625,0.8189,-0.01928,0.06341,0.0639,0.07616,0.425,0.325 -_D:,33,Base,-1,4434,0.1885,1,0,0.3275,0.8571,0.2025,0.1831,0.08824,0.1123,0.85,0.575,0.8178,0.04442,0.03104,0.04301,0.06298,0.475,0.25 -_D:,34,Base,-1,4616,0.1963,1,0,0.3006,0.8558,0.1647,0.1398,0.1796,0.2341,0.875,0.5,0.8396,0.03554,0.03326,0.1462,0.1792,0.675,0.2 -_D:,35,Base,-1,4426,0.1882,1,0,0.3235,0.8436,0.1509,0.1471,0.0837,0.1681,0.775,0.575,0.8187,0.02911,-0.001897,0.07189,0.1142,0.5,0.275 -_D:,36,Base,-1,4533,0.1927,1,0,0.3094,0.8537,0.1666,0.1702,0.1489,0.228,0.775,0.7,0.8224,0.05308,0.05666,0.1101,0.2082,0.5,0.3 -_D:,37,Base,-1,4464,0.1898,1,0,0.3153,0.8667,0.1743,0.1973,0.1664,0.1662,0.9,0.625,0.8476,0.03638,0.04205,0.1345,0.1564,0.825,0.25 -_D:,38,Base,-1,4112,0.1748,1,0,0.3812,0.8327,0.2007,0.2045,0.06513,0.1594,0.625,0.7,0.7898,0.01621,0.05661,0.05103,0.1129,0.275,0.2 -_D:,39,Base,-1,4782,0.2033,1,0,0.2729,0.8522,0.1081,0.112,0.1335,0.1591,0.825,0.45,0.8296,0.04202,0.03618,0.09702,0.1261,0.675,0.325 -_D:,40,Base,-1,4509,0.1917,1,0,0.3116,0.8559,0.1539,0.1508,0.1395,0.1217,0.85,0.625,0.8221,0.05025,0.03958,0.06731,0.09753,0.65,0.3 -_D:,41,Base,-1,4482,0.1905,1,0,0.3271,0.8283,0.168,0.1385,0.03715,0.1241,0.6,0.65,0.7962,0.03367,0.06251,0.02074,0.1349,0.25,0.25 -_D:,42,Base,-1,4559,0.1938,1,0,0.3216,0.8646,0.1235,0.1606,0.1489,0.1948,0.875,0.575,0.8368,0.01239,0.02018,0.1296,0.1843,0.675,0.25 -_D:,43,Base,-1,4489,0.1908,1,0,0.3151,0.851,0.1277,0.1647,0.09662,0.1045,0.8,0.525,0.822,-0.001035,-0.004696,0.09362,0.03887,0.475,0.225 -_D:,44,Base,-1,4285,0.1822,1,0,0.3524,0.8434,0.2241,0.1854,0.06867,0.1585,0.75,0.625,0.8054,0.0504,0.03446,0.01862,0.1473,0.4,0.275 -_D:,45,Base,-1,4392,0.1867,1,0,0.3426,0.8344,0.1922,0.1574,0.04894,0.141,0.65,0.7,0.7938,-0.01434,0.01392,0.02484,0.1188,0.15,0.15 -_D:,46,Base,-1,4283,0.1821,1,0,0.3602,0.8261,0.1468,0.1513,0.1184,0.1581,0.625,0.5,0.8081,0.01841,-0.00589,0.1058,0.1442,0.375,0.275 -_D:,47,Base,-1,4356,0.1852,1,0,0.3369,0.8438,0.07923,0.05698,0.1751,0.2076,0.675,0.375,0.8237,-0.004864,-0.0002041,0.1153,0.1859,0.55,0.15 -_D:,48,Base,-1,4181,0.1778,1,0,0.3769,0.8249,0.1296,0.1647,0.07875,0.1398,0.45,0.5,0.7936,-0.006336,0.01134,0.04809,0.1174,0.3,0.15 -_D:,49,Base,-1,4475,0.1903,1,0,0.325,0.8424,0.1136,0.1021,0.1535,0.1994,0.7,0.45,0.8289,0.03519,0.06062,0.09485,0.1643,0.625,0.275 diff --git a/tensor/cmd/ttail/ttail.go b/tensor/cmd/ttail/ttail.go deleted file mode 100644 index 5690530868..0000000000 --- a/tensor/cmd/ttail/ttail.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright (c) 2020, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "fmt" - "log" - "os" - "time" - - "github.com/nsf/termbox-go" -) - -func main() { - err := termbox.Init() - if err != nil { - log.Println(err) - panic(err) - } - defer termbox.Close() - - TheFiles.Open(os.Args[1:]) - - nf := len(TheFiles) - if nf == 0 { - fmt.Printf("usage: etail ... (space separated)\n") - return - } - - if nf > 1 { - TheTerm.ShowFName = true - } - - err = TheTerm.ToggleTail() // start in tail mode - if err != nil { - log.Println(err) - panic(err) - } - - Tailer := time.NewTicker(time.Duration(500) * time.Millisecond) - go func() { - for { - <-Tailer.C - TheTerm.TailCheck() - } - }() - -loop: - for { - switch ev := termbox.PollEvent(); ev.Type { - case termbox.EventKey: - switch { - case ev.Key == termbox.KeyEsc || ev.Ch == 'Q' || ev.Ch == 'q': - break loop - case ev.Ch == ' ' || ev.Ch == 'n' || ev.Ch == 'N' || ev.Key == termbox.KeyPgdn || ev.Key == termbox.KeySpace: - TheTerm.NextPage() - case ev.Ch == 'p' || ev.Ch == 'P' || ev.Key == termbox.KeyPgup: - TheTerm.PrevPage() - case ev.Key == termbox.KeyArrowDown: - TheTerm.NextLine() - case ev.Key == termbox.KeyArrowUp: - TheTerm.PrevLine() - case ev.Ch == 'f' || ev.Ch == 'F' || ev.Key == termbox.KeyArrowRight: - TheTerm.ScrollRight() - case ev.Ch == 'b' || ev.Ch == 'B' || ev.Key == termbox.KeyArrowLeft: - TheTerm.ScrollLeft() - case ev.Ch == 'a' || ev.Ch == 'A' || ev.Key == termbox.KeyHome: - TheTerm.Top() - case ev.Ch == 'e' || ev.Ch == 'E' || ev.Key == termbox.KeyEnd: - TheTerm.End() - case ev.Ch == 'w' || ev.Ch == 'W': - TheTerm.FixRight() - case ev.Ch == 's' || ev.Ch == 'S': - TheTerm.FixLeft() - case ev.Ch == 'v' || ev.Ch == 'V': - TheTerm.FilesNext() - case ev.Ch == 'u' || ev.Ch == 'U': - TheTerm.FilesPrev() - case ev.Ch == 'm' || ev.Ch == 'M': - TheTerm.MoreMinLines() - case ev.Ch == 'l' || ev.Ch == 'L': - TheTerm.LessMinLines() - case ev.Ch == 'd' || ev.Ch == 'D': - TheTerm.ToggleNames() - case ev.Ch == 't' || ev.Ch == 'T': - TheTerm.ToggleTail() - case ev.Ch == 'c' || ev.Ch == 'C': - TheTerm.ToggleColNums() - case ev.Ch == 'h' || ev.Ch == 'H': - TheTerm.Help() - } - case termbox.EventResize: - TheTerm.Draw() - } - } -} diff --git a/tensor/databrowser/browser.go b/tensor/databrowser/browser.go deleted file mode 100644 index 21d7cb268a..0000000000 --- a/tensor/databrowser/browser.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package databrowser - -//go:generate core generate - -import ( - "io/fs" - "path/filepath" - - "cogentcore.org/core/base/errors" - "cogentcore.org/core/base/fsx" - "cogentcore.org/core/core" - "cogentcore.org/core/events" - "cogentcore.org/core/filetree" - "cogentcore.org/core/icons" - "cogentcore.org/core/styles" - "cogentcore.org/core/tree" - "cogentcore.org/core/types" -) - -// Browser is a data browser, for browsing data either on an os filesystem -// or as a datafs virtual data filesystem. -type Browser struct { - core.Frame - - // FS is the filesystem, if browsing an FS - FS fs.FS - - // DataRoot is the path to the root of the data to browse - DataRoot string - - toolbar *core.Toolbar - splits *core.Splits - files *filetree.Tree - tabs *core.Tabs -} - -// Init initializes with the data and script directories -func (br *Browser) Init() { - br.Frame.Init() - br.Styler(func(s *styles.Style) { - s.Grow.Set(1, 1) - }) - - br.OnShow(func(e events.Event) { - br.UpdateFiles() - }) - - tree.AddChildAt(br, "splits", func(w *core.Splits) { - br.splits = w - w.SetSplits(.15, .85) - tree.AddChildAt(w, "fileframe", func(w *core.Frame) { - w.Styler(func(s *styles.Style) { - s.Direction = styles.Column - s.Overflow.Set(styles.OverflowAuto) - s.Grow.Set(1, 1) - }) - tree.AddChildAt(w, "filetree", func(w *filetree.Tree) { - br.files = w - w.FileNodeType = types.For[FileNode]() - // w.OnSelect(func(e events.Event) { - // e.SetHandled() - // sels := w.SelectedViews() - // if sels != nil { - // br.FileNodeSelected(sn) - // } - // }) - }) - }) - tree.AddChildAt(w, "tabs", func(w *core.Tabs) { - br.tabs = w - w.Type = core.FunctionalTabs - }) - }) -} - -// NewBrowserWindow opens a new data Browser for given -// file system (nil for os files) and data directory. -func NewBrowserWindow(fsys fs.FS, dataDir string) *Browser { - b := core.NewBody("Cogent Data Browser: " + fsx.DirAndFile(dataDir)) - br := NewBrowser(b) - br.FS = fsys - ddr := dataDir - if fsys == nil { - ddr = errors.Log1(filepath.Abs(dataDir)) - } - b.AddTopBar(func(bar *core.Frame) { - tb := core.NewToolbar(bar) - br.toolbar = tb - tb.Maker(br.MakeToolbar) - }) - br.SetDataRoot(ddr) - b.RunWindow() - return br -} - -// ParentBrowser returns the Browser parent of given node -func ParentBrowser(tn tree.Node) *Browser { - var res *Browser - tn.AsTree().WalkUp(func(n tree.Node) bool { - if c, ok := n.(*Browser); ok { - res = c - return false - } - return true - }) - return res -} - -// UpdateFiles Updates the files list. -func (br *Browser) UpdateFiles() { //types:add - files := br.files - if br.FS != nil { - files.SortByModTime = true - files.OpenPathFS(br.FS, br.DataRoot) - } else { - files.OpenPath(br.DataRoot) - } - br.Update() -} - -func (br *Browser) MakeToolbar(p *tree.Plan) { - tree.Add(p, func(w *core.FuncButton) { - w.SetFunc(br.UpdateFiles).SetText("").SetIcon(icons.Refresh).SetShortcut("Command+U") - }) -} diff --git a/tensor/databrowser/datatab.go b/tensor/databrowser/datatab.go deleted file mode 100644 index 285a5c6a81..0000000000 --- a/tensor/databrowser/datatab.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package databrowser - -import ( - "cogentcore.org/core/core" - "cogentcore.org/core/plot/plotcore" - "cogentcore.org/core/styles" - "cogentcore.org/core/tensor" - "cogentcore.org/core/tensor/table" - "cogentcore.org/core/tensor/tensorcore" - "cogentcore.org/core/texteditor" -) - -// NewTab creates a tab with given label, or returns the existing one -// with given type of widget within it. mkfun function is called to create -// and configure a new widget if not already existing. -func NewTab[T any](br *Browser, label string, mkfun func(tab *core.Frame) T) T { - tab := br.tabs.RecycleTab(label) - if tab.HasChildren() { - return tab.Child(1).(T) - } - w := mkfun(tab) - return w -} - -// NewTabTensorTable creates a tab with a tensorcore.Table widget -// to view given table.Table, using its own table.IndexView as tv.Table. -// Use tv.Table.Table to get the underlying *table.Table -// Use tv.Table.Sequential to update the IndexView to view -// all of the rows when done updating the Table, and then call br.Update() -func (br *Browser) NewTabTensorTable(label string, dt *table.Table) *tensorcore.Table { - tv := NewTab[*tensorcore.Table](br, label, func(tab *core.Frame) *tensorcore.Table { - tb := core.NewToolbar(tab) - tv := tensorcore.NewTable(tab) - tb.Maker(tv.MakeToolbar) - return tv - }) - tv.SetTable(dt) - br.Update() - return tv -} - -// NewTabTensorEditor creates a tab with a tensorcore.TensorEditor widget -// to view given Tensor. -func (br *Browser) NewTabTensorEditor(label string, tsr tensor.Tensor) *tensorcore.TensorEditor { - tv := NewTab[*tensorcore.TensorEditor](br, label, func(tab *core.Frame) *tensorcore.TensorEditor { - tb := core.NewToolbar(tab) - tv := tensorcore.NewTensorEditor(tab) - tb.Maker(tv.MakeToolbar) - return tv - }) - tv.SetTensor(tsr) - br.Update() - return tv -} - -// NewTabTensorGrid creates a tab with a tensorcore.TensorGrid widget -// to view given Tensor. -func (br *Browser) NewTabTensorGrid(label string, tsr tensor.Tensor) *tensorcore.TensorGrid { - tv := NewTab[*tensorcore.TensorGrid](br, label, func(tab *core.Frame) *tensorcore.TensorGrid { - // tb := core.NewToolbar(tab) - tv := tensorcore.NewTensorGrid(tab) - // tb.Maker(tv.MakeToolbar) - return tv - }) - tv.SetTensor(tsr) - br.Update() - return tv -} - -// NewTabPlot creates a tab with a Plot of given table.Table. -func (br *Browser) NewTabPlot(label string, dt *table.Table) *plotcore.PlotEditor { - pl := NewTab[*plotcore.PlotEditor](br, label, func(tab *core.Frame) *plotcore.PlotEditor { - return plotcore.NewSubPlot(tab) - }) - pl.SetTable(dt) - br.Update() - return pl -} - -// NewTabSliceTable creates a tab with a core.Table widget -// to view the given slice of structs. -func (br *Browser) NewTabSliceTable(label string, slc any) *core.Table { - tv := NewTab[*core.Table](br, label, func(tab *core.Frame) *core.Table { - return core.NewTable(tab) - }) - tv.SetSlice(slc) - br.Update() - return tv -} - -// NewTabEditor opens a texteditor.Editor tab, displaying given string. -func (br *Browser) NewTabEditor(label, content string) *texteditor.Editor { - ed := NewTab[*texteditor.Editor](br, label, func(tab *core.Frame) *texteditor.Editor { - ed := texteditor.NewEditor(tab) - ed.Styler(func(s *styles.Style) { - s.Grow.Set(1, 1) - }) - return ed - }) - if content != "" { - ed.Buffer.SetText([]byte(content)) - } - br.Update() - return ed -} - -// NewTabEditorFile opens an editor tab for given file -func (br *Browser) NewTabEditorFile(label, filename string) *texteditor.Editor { - ed := NewTab[*texteditor.Editor](br, label, func(tab *core.Frame) *texteditor.Editor { - ed := texteditor.NewEditor(tab) - ed.Styler(func(s *styles.Style) { - s.Grow.Set(1, 1) - }) - return ed - }) - ed.Buffer.Open(core.Filename(filename)) - br.Update() - return ed -} diff --git a/tensor/databrowser/filetree.go b/tensor/databrowser/filetree.go deleted file mode 100644 index ad289ac312..0000000000 --- a/tensor/databrowser/filetree.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package databrowser - -import ( - "image" - "log" - "reflect" - "strings" - - "cogentcore.org/core/base/errors" - "cogentcore.org/core/base/fileinfo" - "cogentcore.org/core/base/fsx" - "cogentcore.org/core/core" - "cogentcore.org/core/events" - "cogentcore.org/core/filetree" - "cogentcore.org/core/icons" - "cogentcore.org/core/styles" - "cogentcore.org/core/styles/states" - "cogentcore.org/core/tensor/datafs" - "cogentcore.org/core/tensor/table" - "cogentcore.org/core/texteditor/diffbrowser" -) - -// FileNode is databrowser version of FileNode for FileTree -type FileNode struct { - filetree.Node -} - -func (fn *FileNode) Init() { - fn.Node.Init() - fn.AddContextMenu(fn.ContextMenu) -} - -func (fn *FileNode) WidgetTooltip(pos image.Point) (string, image.Point) { - res := fn.Tooltip - if fn.Info.Cat == fileinfo.Data { - ofn := fn.AsNode() - switch fn.Info.Known { - case fileinfo.Number, fileinfo.String: - dv := DataFS(ofn) - v, _ := dv.AsString() - if res != "" { - res += " " - } - res += v - } - } - return res, fn.DefaultTooltipPos() -} - -// DataFS returns the datafs representation of this item. -// returns nil if not a dataFS item. -func DataFS(fn *filetree.Node) *datafs.Data { - dfs, ok := fn.FileRoot.FS.(*datafs.Data) - if !ok { - return nil - } - dfi, err := dfs.Stat(string(fn.Filepath)) - if errors.Log(err) != nil { - return nil - } - return dfi.(*datafs.Data) -} - -func (fn *FileNode) GetFileInfo() error { - err := fn.InitFileInfo() - if fn.FileRoot.FS == nil { - return err - } - d := DataFS(fn.AsNode()) - if d != nil { - fn.Info.Known = d.KnownFileInfo() - fn.Info.Cat = fileinfo.Data - switch fn.Info.Known { - case fileinfo.Tensor: - fn.Info.Ic = icons.BarChart - case fileinfo.Table: - fn.Info.Ic = icons.BarChart4Bars - case fileinfo.Number: - fn.Info.Ic = icons.Tag - case fileinfo.String: - fn.Info.Ic = icons.Title - default: - fn.Info.Ic = icons.BarChart - } - } - return err -} - -func (fn *FileNode) OpenFile() error { - ofn := fn.AsNode() - br := ParentBrowser(fn.This) - if br == nil { - return nil - } - df := fsx.DirAndFile(string(fn.Filepath)) - switch { - case fn.Info.Cat == fileinfo.Data: - switch fn.Info.Known { - case fileinfo.Tensor: - d := DataFS(ofn) - tsr := d.AsTensor() - if tsr.IsString() || tsr.DataType() < reflect.Float32 { - br.NewTabTensorEditor(df, tsr) - } else { - br.NewTabTensorGrid(df, tsr) - } - case fileinfo.Table: - d := DataFS(ofn) - dt := d.AsTable() - br.NewTabTensorTable(df, dt) - br.Update() - case fileinfo.Number: - dv := DataFS(ofn) - v, _ := dv.AsFloat32() - d := core.NewBody(df) - core.NewText(d).SetType(core.TextSupporting).SetText(df) - sp := core.NewSpinner(d).SetValue(v) - d.AddBottomBar(func(bar *core.Frame) { - d.AddCancel(bar) - d.AddOK(bar).OnClick(func(e events.Event) { - dv.SetFloat32(sp.Value) - }) - }) - d.RunDialog(br) - case fileinfo.String: - dv := DataFS(ofn) - v, _ := dv.AsString() - d := core.NewBody(df) - core.NewText(d).SetType(core.TextSupporting).SetText(df) - tf := core.NewTextField(d).SetText(v) - d.AddBottomBar(func(bar *core.Frame) { - d.AddCancel(bar) - d.AddOK(bar).OnClick(func(e events.Event) { - dv.SetString(tf.Text()) - }) - }) - d.RunDialog(br) - - default: - dt := table.NewTable() - err := dt.OpenCSV(fn.Filepath, table.Tab) // todo: need more flexible data handling mode - if err != nil { - core.ErrorSnackbar(br, err) - } else { - br.NewTabTensorTable(df, dt) - } - } - case fn.IsExec(): // todo: use exec? - fn.OpenFilesDefault() - case fn.Info.Cat == fileinfo.Video: // todo: use our video viewer - fn.OpenFilesDefault() - case fn.Info.Cat == fileinfo.Audio: // todo: use our audio viewer - fn.OpenFilesDefault() - case fn.Info.Cat == fileinfo.Image: // todo: use our image viewer - fn.OpenFilesDefault() - case fn.Info.Cat == fileinfo.Model: // todo: use xyz - fn.OpenFilesDefault() - case fn.Info.Cat == fileinfo.Sheet: // todo: use our spreadsheet :) - fn.OpenFilesDefault() - case fn.Info.Cat == fileinfo.Bin: // don't edit - fn.OpenFilesDefault() - case fn.Info.Cat == fileinfo.Archive || fn.Info.Cat == fileinfo.Backup: // don't edit - fn.OpenFilesDefault() - default: - br.NewTabEditor(df, string(fn.Filepath)) - } - return nil -} - -// EditFiles calls EditFile on selected files -func (fn *FileNode) EditFiles() { //types:add - fn.SelectedFunc(func(sn *filetree.Node) { - sn.This.(*FileNode).EditFile() - }) -} - -// EditFile pulls up this file in a texteditor -func (fn *FileNode) EditFile() { - if fn.IsDir() { - log.Printf("FileNode Edit -- cannot view (edit) directories!\n") - return - } - br := ParentBrowser(fn.This) - if br == nil { - return - } - if fn.Info.Cat == fileinfo.Data { - fn.OpenFile() - return - } - df := fsx.DirAndFile(string(fn.Filepath)) - br.NewTabEditor(df, string(fn.Filepath)) -} - -// PlotFiles calls PlotFile on selected files -func (fn *FileNode) PlotFiles() { //types:add - fn.SelectedFunc(func(sn *filetree.Node) { - if sfn, ok := sn.This.(*FileNode); ok { - sfn.PlotFile() - } - }) -} - -// PlotFile pulls up this file in a texteditor. -func (fn *FileNode) PlotFile() { - br := ParentBrowser(fn.This) - if br == nil { - return - } - d := DataFS(fn.AsNode()) - df := fsx.DirAndFile(string(fn.Filepath)) - ptab := df + " Plot" - var dt *table.Table - switch { - case fn.IsDir(): - dt = d.DirTable(nil) - case fn.Info.Cat == fileinfo.Data: - switch fn.Info.Known { - case fileinfo.Tensor: - tsr := d.AsTensor() - dt = table.NewTable(df) - dt.Rows = tsr.DimSize(0) - rc := dt.AddIntColumn("Row") - for r := range dt.Rows { - rc.Values[r] = r - } - dt.AddColumn(tsr, fn.Name) - case fileinfo.Table: - dt = d.AsTable() - default: - dt = table.NewTable(df) - err := dt.OpenCSV(fn.Filepath, table.Tab) // todo: need more flexible data handling mode - if err != nil { - core.ErrorSnackbar(br, err) - dt = nil - } - } - } - if dt == nil { - return - } - pl := br.NewTabPlot(ptab, dt) - pl.Options.Title = df - // TODO: apply column and plot level options. - br.Update() -} - -// DiffDirs displays a browser with differences between two selected directories -func (fn *FileNode) DiffDirs() { //types:add - var da, db *filetree.Node - fn.SelectedFunc(func(sn *filetree.Node) { - if sn.IsDir() { - if da == nil { - da = sn - } else if db == nil { - db = sn - } - } - }) - if da == nil || db == nil { - core.MessageSnackbar(fn, "DiffDirs requires two selected directories") - return - } - NewDiffBrowserDirs(string(da.Filepath), string(db.Filepath)) -} - -// NewDiffBrowserDirs returns a new diff browser for files that differ -// within the two given directories. Excludes Job and .tsv data files. -func NewDiffBrowserDirs(pathA, pathB string) { - brow, b := diffbrowser.NewBrowserWindow() - brow.DiffDirs(pathA, pathB, func(fname string) bool { - if IsTableFile(fname) { - return true - } - if strings.HasPrefix(fname, "job.") || fname == "dbmeta.toml" { - return true - } - return false - }) - b.RunWindow() -} - -func IsTableFile(fname string) bool { - return strings.HasSuffix(fname, ".tsv") || strings.HasSuffix(fname, ".csv") -} - -func (fn *FileNode) ContextMenu(m *core.Scene) { - core.NewFuncButton(m).SetFunc(fn.EditFiles).SetText("Edit").SetIcon(icons.Edit). - Styler(func(s *styles.Style) { - s.SetState(!fn.HasSelection(), states.Disabled) - }) - core.NewFuncButton(m).SetFunc(fn.PlotFiles).SetText("Plot").SetIcon(icons.Edit). - Styler(func(s *styles.Style) { - s.SetState(!fn.HasSelection() || fn.Info.Cat != fileinfo.Data, states.Disabled) - }) - core.NewFuncButton(m).SetFunc(fn.DiffDirs).SetText("Diff Dirs").SetIcon(icons.Edit). - Styler(func(s *styles.Style) { - s.SetState(!fn.HasSelection() || !fn.IsDir(), states.Disabled) - }) -} diff --git a/tensor/databrowser/typegen.go b/tensor/databrowser/typegen.go deleted file mode 100644 index 7ffdb629a5..0000000000 --- a/tensor/databrowser/typegen.go +++ /dev/null @@ -1,31 +0,0 @@ -// Code generated by "core generate"; DO NOT EDIT. - -package databrowser - -import ( - "io/fs" - - "cogentcore.org/core/tree" - "cogentcore.org/core/types" -) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/tensor/databrowser.Browser", IDName: "browser", Doc: "Browser is a data browser, for browsing data either on an os filesystem\nor as a datafs virtual data filesystem.", Methods: []types.Method{{Name: "UpdateFiles", Doc: "UpdateFiles Updates the file picker with current files in DataRoot,", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}}, Embeds: []types.Field{{Name: "Frame"}}, Fields: []types.Field{{Name: "FS", Doc: "Filesystem, if browsing an FS"}, {Name: "DataRoot", Doc: "DataRoot is the path to the root of the data to browse"}, {Name: "toolbar"}}}) - -// NewBrowser returns a new [Browser] with the given optional parent: -// Browser is a data browser, for browsing data either on an os filesystem -// or as a datafs virtual data filesystem. -func NewBrowser(parent ...tree.Node) *Browser { return tree.New[Browser](parent...) } - -// SetFS sets the [Browser.FS]: -// Filesystem, if browsing an FS -func (t *Browser) SetFS(v fs.FS) *Browser { t.FS = v; return t } - -// SetDataRoot sets the [Browser.DataRoot]: -// DataRoot is the path to the root of the data to browse -func (t *Browser) SetDataRoot(v string) *Browser { t.DataRoot = v; return t } - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/tensor/databrowser.FileNode", IDName: "file-node", Doc: "FileNode is databrowser version of FileNode for FileTree", Methods: []types.Method{{Name: "EditFiles", Doc: "EditFiles calls EditFile on selected files", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "PlotFiles", Doc: "PlotFiles calls PlotFile on selected files", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "DiffDirs", Doc: "DiffDirs displays a browser with differences between two selected directories", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}}, Embeds: []types.Field{{Name: "Node"}}}) - -// NewFileNode returns a new [FileNode] with the given optional parent: -// FileNode is databrowser version of FileNode for FileTree -func NewFileNode(parent ...tree.Node) *FileNode { return tree.New[FileNode](parent...) } diff --git a/tensor/datafs/README.md b/tensor/datafs/README.md deleted file mode 100644 index c2054b8ab6..0000000000 --- a/tensor/datafs/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# datafs: a virtual filesystem for data - -TODO: write docs - -# Constraints - -* no pointers -- GPU does not like pointers -- use Set / As accessors -* names within directory must be unique - diff --git a/tensor/datafs/data.go b/tensor/datafs/data.go deleted file mode 100644 index 8c83a4d94d..0000000000 --- a/tensor/datafs/data.go +++ /dev/null @@ -1,313 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package datafs - -import ( - "errors" - "reflect" - "time" - "unsafe" - - "cogentcore.org/core/base/fileinfo" - "cogentcore.org/core/base/metadata" - "cogentcore.org/core/base/reflectx" - "cogentcore.org/core/tensor" - "cogentcore.org/core/tensor/table" -) - -// Data is a single item of data, the "file" or "directory" in the data filesystem. -type Data struct { - // Parent is the parent data directory - Parent *Data - - // name is the name of this item. it is not a path. - name string - - // modTime tracks time added to directory, used for ordering. - modTime time.Time - - // Meta has metadata, including standardized support for - // plotting options, compute functions. - Meta metadata.Data - - // Value is the underlying value of data; - // is a map[string]*Data for directories. - Value any -} - -// NewData returns a new Data item in given directory Data item, -// which can be nil. If not a directory, an error will be generated. -// The modTime is automatically set to now, and can be used for sorting -// by order created. The name must be unique within parent. -func NewData(dir *Data, name string) (*Data, error) { - d := &Data{Parent: dir, name: name, modTime: time.Now()} - var err error - if dir != nil { - err = dir.Add(d) - } - return d, err -} - -// New adds new data item(s) of given basic type to given directory, -// with given name(s) (at least one is required). -// Values are initialized to zero value for type. -// All names must be unique in the directory. -// Returns the first item created, for immediate use of one value. -func New[T any](dir *Data, names ...string) (*Data, error) { - if len(names) == 0 { - err := errors.New("datafs.New requires at least 1 name") - return nil, err - } - var r *Data - var errs []error - for _, nm := range names { - var v T - d, err := NewData(dir, nm) - if err != nil { - errs = append(errs, err) - continue - } - d.Value = v - if r == nil { - r = d - } - } - return r, errors.Join(errs...) -} - -// NewTensor returns a new Tensor of given data type, shape sizes, -// and optional dimension names, in given directory Data item. -// The name must be unique in the directory. -func NewTensor[T string | bool | float32 | float64 | int | int32 | byte](dir *Data, name string, sizes []int, names ...string) (tensor.Tensor, error) { - tsr := tensor.New[T](sizes, names...) - d, err := NewData(dir, name) - d.Value = tsr - return tsr, err -} - -// NewTable makes new table.Table(s) in given directory, -// for given name(s) (at least one is required). -// All names must be unique in the directory. -// Returns the first table created, for immediate use of one item. -func NewTable(dir *Data, names ...string) (*table.Table, error) { - if len(names) == 0 { - err := errors.New("datafs.New requires at least 1 name") - return nil, err - } - var r *table.Table - var errs []error - for _, nm := range names { - t := table.NewTable(nm) - d, err := NewData(dir, nm) - if err != nil { - errs = append(errs, err) - continue - } - d.Value = t - if r == nil { - r = t - } - } - return r, errors.Join(errs...) -} - -/////////////////////////////// -// Data Access - -// IsNumeric returns true if the [DataType] is a basic scalar -// numerical value, e.g., float32, int, etc. -func (d *Data) IsNumeric() bool { - return reflectx.KindIsNumber(d.DataType()) -} - -// DataType returns the type of the data elements in the tensor. -// Bool is returned for the Bits tensor type. -func (d *Data) DataType() reflect.Kind { - if d.Value == nil { - return reflect.Invalid - } - return reflect.TypeOf(d.Value).Kind() -} - -func (d *Data) KnownFileInfo() fileinfo.Known { - if tsr := d.AsTensor(); tsr != nil { - return fileinfo.Tensor - } - kind := d.DataType() - if reflectx.KindIsNumber(kind) { - return fileinfo.Number - } - if kind == reflect.String { - return fileinfo.String - } - return fileinfo.Unknown -} - -// AsTensor returns the data as a tensor if it is one, else nil. -func (d *Data) AsTensor() tensor.Tensor { - tsr, _ := d.Value.(tensor.Tensor) - return tsr -} - -// AsTable returns the data as a table if it is one, else nil. -func (d *Data) AsTable() *table.Table { - dt, _ := d.Value.(*table.Table) - return dt -} - -// AsFloat64 returns data as a float64 if it is a scalar value -// that can be so converted. Returns false if not. -func (d *Data) AsFloat64() (float64, bool) { - // fast path for actual floats - if f, ok := d.Value.(float64); ok { - return f, true - } - if f, ok := d.Value.(float32); ok { - return float64(f), true - } - if tsr := d.AsTensor(); tsr != nil { - return 0, false - } - if dt := d.AsTable(); dt != nil { - return 0, false - } - v, err := reflectx.ToFloat(d.Value) - if err != nil { - return 0, false - } - return v, true -} - -// SetFloat64 sets data from given float64 if it is a scalar value -// that can be so set. Returns false if not. -func (d *Data) SetFloat64(v float64) bool { - // fast path for actual floats - if _, ok := d.Value.(float64); ok { - d.Value = v - return true - } - if _, ok := d.Value.(float32); ok { - d.Value = float32(v) - return true - } - if tsr := d.AsTensor(); tsr != nil { - return false - } - if dt := d.AsTable(); dt != nil { - return false - } - err := reflectx.SetRobust(&d.Value, v) - if err != nil { - return false - } - return true -} - -// AsFloat32 returns data as a float32 if it is a scalar value -// that can be so converted. Returns false if not. -func (d *Data) AsFloat32() (float32, bool) { - v, ok := d.AsFloat64() - return float32(v), ok -} - -// SetFloat32 sets data from given float32 if it is a scalar value -// that can be so set. Returns false if not. -func (d *Data) SetFloat32(v float32) bool { - return d.SetFloat64(float64(v)) -} - -// AsString returns data as a string if it is a scalar value -// that can be so converted. Returns false if not. -func (d *Data) AsString() (string, bool) { - // fast path for actual strings - if s, ok := d.Value.(string); ok { - return s, true - } - if tsr := d.AsTensor(); tsr != nil { - return "", false - } - if dt := d.AsTable(); dt != nil { - return "", false - } - s := reflectx.ToString(d.Value) - return s, true -} - -// SetString sets data from given string if it is a scalar value -// that can be so set. Returns false if not. -func (d *Data) SetString(v string) bool { - // fast path for actual strings - if _, ok := d.Value.(string); ok { - d.Value = v - return true - } - if tsr := d.AsTensor(); tsr != nil { - return false - } - if dt := d.AsTable(); dt != nil { - return false - } - err := reflectx.SetRobust(&d.Value, v) - if err != nil { - return false - } - return true -} - -// AsInt returns data as a int if it is a scalar value -// that can be so converted. Returns false if not. -func (d *Data) AsInt() (int, bool) { - // fast path for actual ints - if f, ok := d.Value.(int); ok { - return f, true - } - if tsr := d.AsTensor(); tsr != nil { - return 0, false - } - if dt := d.AsTable(); dt != nil { - return 0, false - } - v, err := reflectx.ToInt(d.Value) - if err != nil { - return 0, false - } - return int(v), true -} - -// SetInt sets data from given int if it is a scalar value -// that can be so set. Returns false if not. -func (d *Data) SetInt(v int) bool { - // fast path for actual ints - if _, ok := d.Value.(int); ok { - d.Value = v - return true - } - if tsr := d.AsTensor(); tsr != nil { - return false - } - if dt := d.AsTable(); dt != nil { - return false - } - err := reflectx.SetRobust(&d.Value, v) - if err != nil { - return false - } - return true -} - -// Bytes returns the byte-wise representation of the data Value. -// This is the actual underlying data, so make a copy if it can be -// unintentionally modified or retained more than for immediate use. -func (d *Data) Bytes() []byte { - if tsr := d.AsTensor(); tsr != nil { - return tsr.Bytes() - } - size := d.Size() - switch x := d.Value.(type) { - // todo: other things here? - default: - return unsafe.Slice((*byte)(unsafe.Pointer(&x)), size) - } -} diff --git a/tensor/datafs/dir.go b/tensor/datafs/dir.go deleted file mode 100644 index 9824f66509..0000000000 --- a/tensor/datafs/dir.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package datafs - -import ( - "errors" - "fmt" - "io/fs" - "path" - "slices" - "sort" - - "golang.org/x/exp/maps" -) - -// NewDir returns a new datafs directory with given name. -// if parent != nil and a directory, this dir is added to it. -// if name is empty, then it is set to "root", the root directory. -// Note that "/" is not allowed for the root directory in Go [fs]. -// Names must be unique within a directory. -func NewDir(name string, parent ...*Data) (*Data, error) { - if name == "" { - name = "root" - } - var par *Data - if len(parent) == 1 { - par = parent[0] - } - d, err := NewData(par, name) - d.Value = make(map[string]*Data) - return d, err -} - -// Item returns data item in given directory by name. -// This is for fast access and direct usage of known -// items, and it will crash if item is not found or -// this data is not a directory. -func (d *Data) Item(name string) *Data { - fm := d.filemap() - return fm[name] -} - -// Items returns data items in given directory by name. -// error reports any items not found, or if not a directory. -func (d *Data) Items(names ...string) ([]*Data, error) { - if err := d.mustDir("Items", ""); err != nil { - return nil, err - } - fm := d.filemap() - var errs []error - var its []*Data - for _, nm := range names { - dt := fm[nm] - if dt != nil { - its = append(its, dt) - } else { - err := fmt.Errorf("datafs Dir %q item not found: %q", d.Path(), nm) - errs = append(errs, err) - } - } - return its, errors.Join(errs...) -} - -// ItemsFunc returns data items in given directory -// filtered by given function, in alpha order. -// If func is nil, all items are returned. -// Any directories within this directory are returned, -// unless specifically filtered. -func (d *Data) ItemsFunc(fun func(item *Data) bool) []*Data { - if err := d.mustDir("ItemsFunc", ""); err != nil { - return nil - } - fm := d.filemap() - names := d.DirNamesAlpha() - var its []*Data - for _, nm := range names { - dt := fm[nm] - if fun != nil && !fun(dt) { - continue - } - its = append(its, dt) - } - return its -} - -// ItemsByTimeFunc returns data items in given directory -// filtered by given function, in time order (i.e., order added). -// If func is nil, all items are returned. -// Any directories within this directory are returned, -// unless specifically filtered. -func (d *Data) ItemsByTimeFunc(fun func(item *Data) bool) []*Data { - if err := d.mustDir("ItemsByTimeFunc", ""); err != nil { - return nil - } - fm := d.filemap() - names := d.DirNamesByTime() - var its []*Data - for _, nm := range names { - dt := fm[nm] - if fun != nil && !fun(dt) { - continue - } - its = append(its, dt) - } - return its -} - -// FlatItemsFunc returns all "leaf" (non directory) data items -// in given directory, recursively descending into directories -// to return a flat list of the entire subtree, -// filtered by given function, in alpha order. The function can -// filter out directories to prune the tree. -// If func is nil, all items are returned. -func (d *Data) FlatItemsFunc(fun func(item *Data) bool) []*Data { - if err := d.mustDir("FlatItemsFunc", ""); err != nil { - return nil - } - fm := d.filemap() - names := d.DirNamesAlpha() - var its []*Data - for _, nm := range names { - dt := fm[nm] - if fun != nil && !fun(dt) { - continue - } - if dt.IsDir() { - subs := dt.FlatItemsFunc(fun) - its = append(its, subs...) - } else { - its = append(its, dt) - } - } - return its -} - -// FlatItemsByTimeFunc returns all "leaf" (non directory) data items -// in given directory, recursively descending into directories -// to return a flat list of the entire subtree, -// filtered by given function, in time order (i.e., order added). -// The function can filter out directories to prune the tree. -// If func is nil, all items are returned. -func (d *Data) FlatItemsByTimeFunc(fun func(item *Data) bool) []*Data { - if err := d.mustDir("FlatItemsByTimeFunc", ""); err != nil { - return nil - } - fm := d.filemap() - names := d.DirNamesByTime() - var its []*Data - for _, nm := range names { - dt := fm[nm] - if fun != nil && !fun(dt) { - continue - } - if dt.IsDir() { - subs := dt.FlatItemsByTimeFunc(fun) - its = append(its, subs...) - } else { - its = append(its, dt) - } - } - return its -} - -// DirAtPath returns directory at given relative path -// from this starting dir. -func (d *Data) DirAtPath(dir string) (*Data, error) { - var err error - dir = path.Clean(dir) - sdf, err := d.Sub(dir) // this ensures that d is a dir - if err != nil { - return nil, err - } - return sdf.(*Data), nil -} - -// Path returns the full path to this data item -func (d *Data) Path() string { - pt := d.name - cur := d.Parent - loops := make(map[*Data]struct{}) - for { - if cur == nil { - return pt - } - if _, ok := loops[cur]; ok { - return pt - } - pt = path.Join(cur.name, pt) - loops[cur] = struct{}{} - cur = cur.Parent - } -} - -// filemap returns the Value as map[string]*Data, or nil if not a dir -func (d *Data) filemap() map[string]*Data { - fm, ok := d.Value.(map[string]*Data) - if !ok { - return nil - } - return fm -} - -// DirNamesAlpha returns the names of items in the directory -// sorted alphabetically. Data must be dir by this point. -func (d *Data) DirNamesAlpha() []string { - fm := d.filemap() - names := maps.Keys(fm) - sort.Strings(names) - return names -} - -// DirNamesByTime returns the names of items in the directory -// sorted by modTime (order added). Data must be dir by this point. -func (d *Data) DirNamesByTime() []string { - fm := d.filemap() - names := maps.Keys(fm) - slices.SortFunc(names, func(a, b string) int { - return fm[a].ModTime().Compare(fm[b].ModTime()) - }) - return names -} - -// mustDir returns an error for given operation and path -// if this data item is not a directory. -func (d *Data) mustDir(op, path string) error { - if !d.IsDir() { - return &fs.PathError{Op: op, Path: path, Err: errors.New("datafs item is not a directory")} - } - return nil -} - -// Add adds an item to this directory data item. -// The only errors are if this item is not a directory, -// or the name already exists. -// Names must be unique within a directory. -func (d *Data) Add(it *Data) error { - if err := d.mustDir("Add", it.name); err != nil { - return err - } - fm := d.filemap() - _, ok := fm[it.name] - if ok { - return &fs.PathError{Op: "add", Path: it.name, Err: errors.New("data item already exists; names must be unique")} - } - fm[it.name] = it - return nil -} - -// Mkdir creates a new directory with the specified name. -// The only error is if this item is not a directory. -func (d *Data) Mkdir(name string) (*Data, error) { - if err := d.mustDir("Mkdir", name); err != nil { - return nil, err - } - return NewDir(name, d) -} diff --git a/tensor/datafs/file.go b/tensor/datafs/file.go deleted file mode 100644 index 74dcfc4a0e..0000000000 --- a/tensor/datafs/file.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package datafs - -import ( - "bytes" - "io" - "io/fs" -) - -// File represents a data item for reading, as an [fs.File]. -// All io functionality is handled by [bytes.Reader]. -type File struct { - bytes.Reader - Data *Data - dirEntries []fs.DirEntry - dirsRead int -} - -func (f *File) Stat() (fs.FileInfo, error) { - return f.Data, nil -} - -func (f *File) Close() error { - f.Reader.Reset(f.Data.Bytes()) - return nil -} - -// DirFile represents a directory data item for reading, as fs.ReadDirFile. -type DirFile struct { - File - dirEntries []fs.DirEntry - dirsRead int -} - -func (f *DirFile) ReadDir(n int) ([]fs.DirEntry, error) { - if err := f.Data.mustDir("DirFile:ReadDir", ""); err != nil { - return nil, err - } - if f.dirEntries == nil { - f.dirEntries, _ = f.Data.ReadDir(".") - f.dirsRead = 0 - } - ne := len(f.dirEntries) - if n <= 0 { - if f.dirsRead >= ne { - return nil, nil - } - re := f.dirEntries[f.dirsRead:] - f.dirsRead = ne - return re, nil - } - if f.dirsRead >= ne { - return nil, io.EOF - } - mx := min(n+f.dirsRead, ne) - re := f.dirEntries[f.dirsRead:mx] - f.dirsRead = mx - return re, nil -} - -func (f *DirFile) Close() error { - f.dirsRead = 0 - return nil -} diff --git a/tensor/datafs/fs.go b/tensor/datafs/fs.go deleted file mode 100644 index 034994cb99..0000000000 --- a/tensor/datafs/fs.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package datafs - -import ( - "bytes" - "errors" - "io/fs" - "path" - "slices" - "sort" - "time" - "unsafe" - - "cogentcore.org/core/base/fsx" - "golang.org/x/exp/maps" -) - -// fs.go contains all the io/fs interface implementations - -// Open opens the given data Value within this datafs filesystem. -func (d *Data) Open(name string) (fs.File, error) { - if !fs.ValidPath(name) { - return nil, &fs.PathError{Op: "open", Path: name, Err: errors.New("invalid name")} - } - dir, file := path.Split(name) - sd, err := d.DirAtPath(dir) - if err != nil { - return nil, err - } - fm := sd.filemap() - itm, ok := fm[file] - if !ok { - if dir == "" && (file == d.name || file == ".") { - return &DirFile{File: File{Reader: *bytes.NewReader(d.Bytes()), Data: d}}, nil - } - return nil, &fs.PathError{Op: "open", Path: name, Err: errors.New("file not found")} - } - if itm.IsDir() { - return &DirFile{File: File{Reader: *bytes.NewReader(itm.Bytes()), Data: itm}}, nil - } - return &File{Reader: *bytes.NewReader(itm.Bytes()), Data: itm}, nil -} - -// Stat returns a FileInfo describing the file. -// If there is an error, it should be of type *PathError. -func (d *Data) Stat(name string) (fs.FileInfo, error) { - if !fs.ValidPath(name) { - return nil, &fs.PathError{Op: "open", Path: name, Err: errors.New("invalid name")} - } - dir, file := path.Split(name) - sd, err := d.DirAtPath(dir) - if err != nil { - return nil, err - } - fm := sd.filemap() - itm, ok := fm[file] - if !ok { - if dir == "" && (file == d.name || file == ".") { - return d, nil - } - return nil, &fs.PathError{Op: "stat", Path: name, Err: errors.New("file not found")} - } - return itm, nil -} - -// Sub returns a data FS corresponding to the subtree rooted at dir. -func (d *Data) Sub(dir string) (fs.FS, error) { - if err := d.mustDir("sub", dir); err != nil { - return nil, err - } - if !fs.ValidPath(dir) { - return nil, &fs.PathError{Op: "sub", Path: dir, Err: errors.New("invalid name")} - } - if dir == "." || dir == "" || dir == d.name { - return d, nil - } - cd := dir - cur := d - root, rest := fsx.SplitRootPathFS(dir) - if root == "." || root == d.name { - cd = rest - } - for { - if cd == "." || cd == "" { - return cur, nil - } - root, rest := fsx.SplitRootPathFS(cd) - if root == "." && rest == "" { - return cur, nil - } - cd = rest - fm := cur.filemap() - sd, ok := fm[root] - if !ok { - return nil, &fs.PathError{Op: "sub", Path: dir, Err: errors.New("directory not found")} - } - if !sd.IsDir() { - return nil, &fs.PathError{Op: "sub", Path: dir, Err: errors.New("is not a directory")} - } - cur = sd - } -} - -// ReadDir returns the contents of the given directory within this filesystem. -// Use "." (or "") to refer to the current directory. -func (d *Data) ReadDir(dir string) ([]fs.DirEntry, error) { - sd, err := d.DirAtPath(dir) - if err != nil { - return nil, err - } - fm := sd.filemap() - names := maps.Keys(fm) - sort.Strings(names) - ents := make([]fs.DirEntry, len(names)) - for i, nm := range names { - ents[i] = fm[nm] - } - return ents, nil -} - -// ReadFile reads the named file and returns its contents. -// A successful call returns a nil error, not io.EOF. -// (Because ReadFile reads the whole file, the expected EOF -// from the final Read is not treated as an error to be reported.) -// -// The caller is permitted to modify the returned byte slice. -// This method should return a copy of the underlying data. -func (d *Data) ReadFile(name string) ([]byte, error) { - if err := d.mustDir("readFile", name); err != nil { - return nil, err - } - if !fs.ValidPath(name) { - return nil, &fs.PathError{Op: "readFile", Path: name, Err: errors.New("invalid name")} - } - dir, file := path.Split(name) - sd, err := d.DirAtPath(dir) - if err != nil { - return nil, err - } - fm := sd.filemap() - itm, ok := fm[file] - if !ok { - return nil, &fs.PathError{Op: "readFile", Path: name, Err: errors.New("file not found")} - } - if itm.IsDir() { - return nil, &fs.PathError{Op: "readFile", Path: name, Err: errors.New("Value is a directory")} - } - return slices.Clone(itm.Bytes()), nil -} - -/////////////////////////////// -// FileInfo interface: - -// Sizer is an interface to allow an arbitrary data Value -// to report its size in bytes. Size is automatically computed for -// known basic data Values supported by datafs directly. -type Sizer interface { - Sizeof() int64 -} - -func (d *Data) Name() string { return d.name } - -// Size returns the size of known data Values, or it uses -// the Sizer interface, otherwise returns 0. -func (d *Data) Size() int64 { - if szr, ok := d.Value.(Sizer); ok { // tensor implements Sizer - return szr.Sizeof() - } - switch x := d.Value.(type) { - case float32, int32, uint32: - return 4 - case float64, int64: - return 8 - case int: - return int64(unsafe.Sizeof(x)) - case complex64: - return 16 - case complex128: - return 32 - } - return 0 -} - -func (d *Data) IsDir() bool { - _, ok := d.Value.(map[string]*Data) - return ok -} - -func (d *Data) ModTime() time.Time { - return d.modTime -} - -func (d *Data) Mode() fs.FileMode { - if d.IsDir() { - return 0755 | fs.ModeDir - } - return 0444 -} - -// Sys returns the metadata for Value -func (d *Data) Sys() any { return d.Meta } - -/////////////////////////////// -// DirEntry interface - -func (d *Data) Type() fs.FileMode { - return d.Mode().Type() -} - -func (d *Data) Info() (fs.FileInfo, error) { - return d, nil -} diff --git a/tensor/datafs/fs_test.go b/tensor/datafs/fs_test.go deleted file mode 100644 index e44f9a2d27..0000000000 --- a/tensor/datafs/fs_test.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package datafs - -import ( - "fmt" - "testing" - "testing/fstest" - - "github.com/stretchr/testify/assert" -) - -func makeTestData(t *testing.T) *Data { - dfs, err := NewDir("root") - assert.NoError(t, err) - net, err := dfs.Mkdir("network") - assert.NoError(t, err) - NewTensor[float32](net, "units", []int{50, 50}) - log, err := dfs.Mkdir("log") - assert.NoError(t, err) - _, err = NewTable(log, "Trial") - assert.NoError(t, err) - return dfs -} - -func TestFS(t *testing.T) { - dfs := makeTestData(t) - dirs, err := dfs.ReadDir(".") - assert.NoError(t, err) - for _, d := range dirs { - fmt.Println(d.Name()) - } - sd, err := dfs.DirAtPath("root") - assert.NoError(t, err) - sd, err = sd.DirAtPath("network") - assert.NoError(t, err) - sd, err = dfs.DirAtPath("root/network") - assert.NoError(t, err) - - if err := fstest.TestFS(dfs, "network/units"); err != nil { - t.Fatal(err) - } -} diff --git a/tensor/datafs/metadata.go b/tensor/datafs/metadata.go deleted file mode 100644 index 3ff8d86c8d..0000000000 --- a/tensor/datafs/metadata.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package datafs - -import ( - "cogentcore.org/core/base/errors" - "cogentcore.org/core/base/fsx" - "cogentcore.org/core/base/metadata" - "cogentcore.org/core/plot/plotcore" - "cogentcore.org/core/tensor/table" -) - -// This file provides standardized metadata options for frequent -// use cases, using codified key names to eliminate typos. - -// SetMetaItems sets given metadata for items in given directory -// with given names. Returns error for any items not found. -func (d *Data) SetMetaItems(key string, value any, names ...string) error { - its, err := d.Items(names...) - for _, it := range its { - it.Meta.Set(key, value) - } - return err -} - -// PlotColumnZeroOne returns plot options with a fixed 0-1 range -func PlotColumnZeroOne() *plotcore.ColumnOptions { - opts := &plotcore.ColumnOptions{} - opts.Range.SetMin(0) - opts.Range.SetMax(1) - return opts -} - -// SetPlotColumnOptions sets given plotting options for named items -// within this directory (stored in Metadata). -func (d *Data) SetPlotColumnOptions(opts *plotcore.ColumnOptions, names ...string) error { - return d.SetMetaItems("PlotColumnOptions", opts, names...) -} - -// PlotColumnOptions returns plotting options if they have been set, else nil. -func (d *Data) PlotColumnOptions() *plotcore.ColumnOptions { - return errors.Ignore1(metadata.Get[*plotcore.ColumnOptions](d.Meta, "PlotColumnOptions")) -} - -// SetCalcFunc sets a function to compute an updated Value for this data item. -// Function is stored as CalcFunc in Metadata. Can be called by [Data.Calc] method. -func (d *Data) SetCalcFunc(fun func() error) { - d.Meta.Set("CalcFunc", fun) -} - -// Calc calls function set by [Data.SetCalcFunc] to compute an updated Value -// for this data item. Returns an error if func not set, or any error from func itself. -// Function is stored as CalcFunc in Metadata. -func (d *Data) Calc() error { - fun, err := metadata.Get[func() error](d.Meta, "CalcFunc") - if err != nil { - return err - } - return fun() -} - -// CalcAll calls function set by [Data.SetCalcFunc] for all items -// in this directory and all of its subdirectories. -// Calls Calc on items from FlatItemsByTimeFunc(nil) -func (d *Data) CalcAll() error { - var errs []error - items := d.FlatItemsByTimeFunc(nil) - for _, it := range items { - err := it.Calc() - if err != nil { - errs = append(errs, err) - } - } - return errors.Join(errs...) -} - -// DirTable returns a table.Table for this directory item, with columns -// as the Tensor elements in the directory and any subdirectories, -// from FlatItemsByTimeFunc using given filter function. -// This is a convenient mechanism for creating a plot of all the data -// in a given directory. -// If such was previously constructed, it is returned from "DirTable" -// Metadata key where the table is stored. -// Row count is updated to current max row. -// Delete that key to reconstruct if items have changed. -func (d *Data) DirTable(fun func(item *Data) bool) *table.Table { - dt, err := metadata.Get[*table.Table](d.Meta, "DirTable") - if err == nil { - var maxRow int - for _, tsr := range dt.Columns { - maxRow = max(maxRow, tsr.DimSize(0)) - } - dt.Rows = maxRow - return dt - } - items := d.FlatItemsByTimeFunc(fun) - dt = table.NewTable(fsx.DirAndFile(string(d.Path()))) - for _, it := range items { - tsr := it.AsTensor() - if tsr == nil { - continue - } - if dt.Rows == 0 { - dt.Rows = tsr.DimSize(0) - } - nm := it.Name() - if it.Parent != d { - nm = fsx.DirAndFile(string(it.Path())) - } - dt.AddColumn(tsr, nm) - } - d.Meta.Set("DirTable", dt) - return dt -} diff --git a/tensor/examples/datafs-sim/sim.go b/tensor/examples/datafs-sim/sim.go deleted file mode 100644 index 2062c560b5..0000000000 --- a/tensor/examples/datafs-sim/sim.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "math/rand/v2" - "reflect" - "strconv" - - "cogentcore.org/core/base/errors" - "cogentcore.org/core/core" - "cogentcore.org/core/tensor" - "cogentcore.org/core/tensor/databrowser" - "cogentcore.org/core/tensor/datafs" - "cogentcore.org/core/tensor/stats/stats" -) - -type Sim struct { - Root *datafs.Data - Config *datafs.Data - Stats *datafs.Data - Logs *datafs.Data -} - -// ConfigAll configures the sim -func (ss *Sim) ConfigAll() { - ss.Root = errors.Log1(datafs.NewDir("Root")) - ss.Config = errors.Log1(ss.Root.Mkdir("Config")) - errors.Log1(datafs.New[int](ss.Config, "NRun", "NEpoch", "NTrial")) - ss.Config.Item("NRun").SetInt(5) - ss.Config.Item("NEpoch").SetInt(20) - ss.Config.Item("NTrial").SetInt(25) - - ss.Stats = ss.ConfigStats(ss.Root) - ss.Logs = ss.ConfigLogs(ss.Root) -} - -// ConfigStats adds basic stats that we record for our simulation. -func (ss *Sim) ConfigStats(dir *datafs.Data) *datafs.Data { - stats := errors.Log1(dir.Mkdir("Stats")) - errors.Log1(datafs.New[int](stats, "Run", "Epoch", "Trial")) // counters - errors.Log1(datafs.New[string](stats, "TrialName")) - errors.Log1(datafs.New[float32](stats, "SSE", "AvgSSE", "TrlErr")) - z1 := datafs.PlotColumnZeroOne() - stats.SetPlotColumnOptions(z1, "AvgErr", "TrlErr") - zmax := datafs.PlotColumnZeroOne() - zmax.Range.FixMax = false - stats.SetPlotColumnOptions(z1, "SSE") - return stats -} - -// ConfigLogs adds first-level logging of stats into tensors -func (ss *Sim) ConfigLogs(dir *datafs.Data) *datafs.Data { - logd := errors.Log1(dir.Mkdir("Log")) - trial := ss.ConfigTrialLog(logd) - ss.ConfigAggLog(logd, "Epoch", trial, stats.Mean, stats.Sem, stats.Min) - return logd -} - -// ConfigTrialLog adds first-level logging of stats into tensors -func (ss *Sim) ConfigTrialLog(dir *datafs.Data) *datafs.Data { - logd := errors.Log1(dir.Mkdir("Trial")) - ntrial, _ := ss.Config.Item("NTrial").AsInt() - sitems := ss.Stats.ItemsByTimeFunc(nil) - for _, st := range sitems { - dt := errors.Log1(datafs.NewData(logd, st.Name())) - tsr := tensor.NewOfType(st.DataType(), []int{ntrial}, "row") - dt.Value = tsr - dt.Meta.Copy(st.Meta) // key affordance: we get meta data from source - dt.SetCalcFunc(func() error { - trl, _ := ss.Stats.Item("Trial").AsInt() - if st.IsNumeric() { - v, _ := st.AsFloat64() - tsr.SetFloat1D(trl, v) - } else { - v, _ := st.AsString() - tsr.SetString1D(trl, v) - } - return nil - }) - } - return logd -} - -// ConfigAggLog adds a higher-level logging of lower-level into higher-level tensors -func (ss *Sim) ConfigAggLog(dir *datafs.Data, level string, from *datafs.Data, aggs ...stats.Stats) *datafs.Data { - logd := errors.Log1(dir.Mkdir(level)) - sitems := ss.Stats.ItemsByTimeFunc(nil) - nctr, _ := ss.Config.Item("N" + level).AsInt() - for _, st := range sitems { - if !st.IsNumeric() { - continue - } - src := from.Item(st.Name()).AsTensor() - if st.DataType() >= reflect.Float32 { - dd := errors.Log1(logd.Mkdir(st.Name())) - for _, ag := range aggs { // key advantage of dir structure: multiple stats per item - dt := errors.Log1(datafs.NewData(dd, ag.String())) - tsr := tensor.NewOfType(st.DataType(), []int{nctr}, "row") - dt.Value = tsr - dt.Meta.Copy(st.Meta) - dt.SetCalcFunc(func() error { - ctr, _ := ss.Stats.Item(level).AsInt() - v := stats.StatTensor(src, ag) - tsr.SetFloat1D(ctr, v) - return nil - }) - } - } else { - dt := errors.Log1(datafs.NewData(logd, st.Name())) - tsr := tensor.NewOfType(st.DataType(), []int{nctr}, "row") - // todo: set level counter as default x axis in plot config - dt.Value = tsr - dt.Meta.Copy(st.Meta) - dt.SetCalcFunc(func() error { - ctr, _ := ss.Stats.Item(level).AsInt() - v, _ := st.AsFloat64() - tsr.SetFloat1D(ctr, v) - return nil - }) - } - } - return logd -} - -func (ss *Sim) Run() { - nepc, _ := ss.Config.Item("NEpoch").AsInt() - ntrl, _ := ss.Config.Item("NTrial").AsInt() - for epc := range nepc { - ss.Stats.Item("Epoch").SetInt(epc) - for trl := range ntrl { - ss.Stats.Item("Trial").SetInt(trl) - ss.RunTrial(trl) - } - ss.EpochDone() - } -} - -func (ss *Sim) RunTrial(trl int) { - ss.Stats.Item("TrialName").SetString("Trial_" + strconv.Itoa(trl)) - sse := rand.Float32() - avgSSE := rand.Float32() - ss.Stats.Item("SSE").SetFloat32(sse) - ss.Stats.Item("AvgSSE").SetFloat32(avgSSE) - trlErr := float32(1) - if sse < 0.5 { - trlErr = 0 - } - ss.Stats.Item("TrlErr").SetFloat32(trlErr) - ss.Logs.Item("Trial").CalcAll() -} - -func (ss *Sim) EpochDone() { - ss.Logs.Item("Epoch").CalcAll() -} - -func main() { - ss := &Sim{} - ss.ConfigAll() - ss.Run() - - databrowser.NewBrowserWindow(ss.Root, "Root") - core.Wait() -} diff --git a/tensor/examples/dataproc/README.md b/tensor/examples/dataproc/README.md deleted file mode 100644 index a12d4d1266..0000000000 --- a/tensor/examples/dataproc/README.md +++ /dev/null @@ -1,6 +0,0 @@ -Build and run this `main` package to see a full demo of how to use this system for data analysis, paralleling the example in [Python Data Science](https://jakevdp.github.io/PythonDataScienceHandbook/03.08-aggregation-and-grouping.html) using pandas, to see directly how that translates into this framework. - -Most of the code is in the `AnalyzePlanets` function, which opens a .csv file, and then uses a number of `IndexView` views of the data to perform various analyses as shown in the GUI tables. Click on the tabs at the very top of the window to see the various analyzed versions of the data shown in the first tab. - -You can also click on headers of the columns to sort by those columns (toggles between ascending and descending), - diff --git a/tensor/examples/dataproc/dataproc.go b/tensor/examples/dataproc/dataproc.go deleted file mode 100644 index d5455b695d..0000000000 --- a/tensor/examples/dataproc/dataproc.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "embed" - "fmt" - "math" - - "cogentcore.org/core/base/errors" - "cogentcore.org/core/core" - "cogentcore.org/core/events" - "cogentcore.org/core/icons" - "cogentcore.org/core/tensor/stats/split" - "cogentcore.org/core/tensor/stats/stats" - "cogentcore.org/core/tensor/table" - "cogentcore.org/core/tensor/tensorcore" - "cogentcore.org/core/tree" -) - -// Planets is raw data -var Planets *table.Table - -// PlanetsDesc are descriptive stats of all (non-Null) data -var PlanetsDesc *table.Table - -// PlanetsNNDesc are descriptive stats of planets where entire row is non-null -var PlanetsNNDesc *table.Table - -// GpMethodOrbit shows the median of orbital period as a function of method -var GpMethodOrbit *table.Table - -// GpMethodYear shows all stats of year described by orbit -var GpMethodYear *table.Table - -// GpMethodDecade shows number of planets found in each decade by given method -var GpMethodDecade *table.Table - -// GpDecade shows number of planets found in each decade -var GpDecade *table.Table - -//go:embed *.csv -var csv embed.FS - -// AnalyzePlanets analyzes planets.csv data following some of the examples -// given here, using pandas: -// -// https://jakevdp.github.io/PythonDataScienceHandbook/03.08-aggregation-and-grouping.html -func AnalyzePlanets() { - Planets = table.NewTable("planets") - Planets.OpenFS(csv, "planets.csv", table.Comma) - - PlanetsAll := table.NewIndexView(Planets) // full original data - - PlanetsDesc = stats.DescAll(PlanetsAll) // individually excludes Null values in each col, but not row-wise - PlanetsNNDesc = stats.DescAll(PlanetsAll) // standard descriptive stats for row-wise non-nulls - - byMethod := split.GroupBy(PlanetsAll, "method") - split.AggColumn(byMethod, "orbital_period", stats.Median) - GpMethodOrbit = byMethod.AggsToTable(table.AddAggName) - - byMethod.DeleteAggs() - split.DescColumn(byMethod, "year") // full desc stats of year - - byMethod.Filter(func(idx int) bool { - ag := errors.Log1(byMethod.AggByColumnName("year:Std")) - return ag.Aggs[idx][0] > 0 // exclude results with 0 std - }) - - GpMethodYear = byMethod.AggsToTable(table.AddAggName) - - byMethodDecade := split.GroupByFunc(PlanetsAll, func(row int) []string { - meth := Planets.StringValue("method", row) - yr := Planets.Float("year", row) - decade := math.Floor(yr/10) * 10 - return []string{meth, fmt.Sprintf("%gs", decade)} - }) - byMethodDecade.SetLevels("method", "decade") - - split.AggColumn(byMethodDecade, "number", stats.Sum) - - // uncomment this to switch to decade first, then method - // byMethodDecade.ReorderLevels([]int{1, 0}) - // byMethodDecade.SortLevels() - - decadeOnly := errors.Log1(byMethodDecade.ExtractLevels([]int{1})) - split.AggColumn(decadeOnly, "number", stats.Sum) - GpDecade = decadeOnly.AggsToTable(table.AddAggName) - - GpMethodDecade = byMethodDecade.AggsToTable(table.AddAggName) // here to ensure that decadeOnly didn't mess up.. - - // todo: need unstack -- should be specific to the splits data because we already have the cols and - // groups etc -- the ExtractLevels method provides key starting point. - - // todo: pivot table -- neeeds unstack function. - - // todo: could have a generic unstack-like method that takes a column for the data to turn into columns - // and another that has the data to put in the cells. -} - -func main() { - AnalyzePlanets() - - b := core.NewBody("dataproc") - tv := core.NewTabs(b) - - nt, _ := tv.NewTab("Planets Data") - tbv := tensorcore.NewTable(nt).SetTable(Planets) - b.AddTopBar(func(bar *core.Frame) { - tb := core.NewToolbar(bar) - tb.Maker(tbv.MakeToolbar) - tb.Maker(func(p *tree.Plan) { - tree.Add(p, func(w *core.Button) { - w.SetText("README").SetIcon(icons.FileMarkdown). - SetTooltip("open README help file").OnClick(func(e events.Event) { - core.TheApp.OpenURL("https://github.com/cogentcore/core/blob/main/tensor/examples/dataproc/README.md") - }) - }) - }) - }) - - nt, _ = tv.NewTab("Non-Null Rows Desc") - tensorcore.NewTable(nt).SetTable(PlanetsNNDesc) - nt, _ = tv.NewTab("All Desc") - tensorcore.NewTable(nt).SetTable(PlanetsDesc) - nt, _ = tv.NewTab("By Method Orbit") - tensorcore.NewTable(nt).SetTable(GpMethodOrbit) - nt, _ = tv.NewTab("By Method Year") - tensorcore.NewTable(nt).SetTable(GpMethodYear) - nt, _ = tv.NewTab("By Method Decade") - tensorcore.NewTable(nt).SetTable(GpMethodDecade) - nt, _ = tv.NewTab("By Decade") - tensorcore.NewTable(nt).SetTable(GpDecade) - - tv.SelectTabIndex(0) - - b.RunMainWindow() -} diff --git a/tensor/examples/dataproc/planets.csv b/tensor/examples/dataproc/planets.csv deleted file mode 100644 index 3373183547..0000000000 --- a/tensor/examples/dataproc/planets.csv +++ /dev/null @@ -1,1036 +0,0 @@ -method,number,orbital_period,mass,distance,year -Radial Velocity,1,269.3,7.1,77.4,2006 -Radial Velocity,1,874.774,2.21,56.95,2008 -Radial Velocity,1,763.0,2.6,19.84,2011 -Radial Velocity,1,326.03,19.4,110.62,2007 -Radial Velocity,1,516.22,10.5,119.47,2009 -Radial Velocity,1,185.84,4.8,76.39,2008 -Radial Velocity,1,1773.4,4.64,18.15,2002 -Radial Velocity,1,798.5,,21.41,1996 -Radial Velocity,1,993.3,10.3,73.1,2008 -Radial Velocity,2,452.8,1.99,74.79,2010 -Radial Velocity,2,883.0,0.86,74.79,2010 -Radial Velocity,1,335.1,9.88,39.43,2009 -Radial Velocity,1,479.1,3.88,97.28,2008 -Radial Velocity,3,1078.0,2.53,14.08,1996 -Radial Velocity,3,2391.0,0.54,14.08,2001 -Radial Velocity,3,14002.0,1.64,14.08,2009 -Radial Velocity,1,4.230785,0.472,15.36,1995 -Radial Velocity,5,14.651,0.8,12.53,1996 -Radial Velocity,5,44.38,0.165,12.53,2004 -Radial Velocity,5,4909.0,3.53,12.53,2002 -Radial Velocity,5,0.73654,,12.53,2011 -Radial Velocity,5,261.2,0.172,12.53,2007 -Radial Velocity,3,4.215,0.016,8.52,2009 -Radial Velocity,3,38.021,0.057,8.52,2009 -Radial Velocity,3,123.01,0.072,8.52,2009 -Radial Velocity,1,116.6884,,18.11,1996 -Radial Velocity,1,691.9,,81.5,2012 -Radial Velocity,1,952.7,5.3,97.18,2008 -Radial Velocity,1,181.4,3.2,45.52,2013 -Imaging,1,,,45.52,2005 -Imaging,1,,,165.0,2007 -Imaging,1,,,140.0,2004 -Eclipse Timing Variations,1,10220.0,6.05,,2009 -Imaging,1,,,,2008 -Imaging,1,,,145.0,2013 -Imaging,1,,,139.0,2004 -Imaging,1,,,18.39,2006 -Eclipse Timing Variations,2,5767.0,,130.72,2008 -Eclipse Timing Variations,2,3321.0,,130.72,2008 -Eclipse Timing Variations,2,5573.55,,500.0,2010 -Eclipse Timing Variations,2,2883.5,,500.0,2010 -Eclipse Timing Variations,1,2900.0,,,2011 -Eclipse Timing Variations,1,4343.5,4.2,,2012 -Eclipse Timing Variations,2,5840.0,,,2011 -Eclipse Timing Variations,2,1916.25,,,2011 -Radial Velocity,1,380.8,1.8,20.21,2010 -Radial Velocity,1,3.2357,0.0036,1.35,2012 -Imaging,1,6000.0,,19.28,2008 -Radial Velocity,1,2502.0,1.55,3.22,2000 -Radial Velocity,1,417.9,,70.42,2012 -Radial Velocity,1,594.9,7.6,47.53,2006 -Radial Velocity,1,428.5,8.78,38.52,2009 -Radial Velocity,1,903.3,1.85,13.79,2003 -Radial Velocity,1,1251.0,,31.12,2007 -Imaging,1,,,52.03,2012 -Radial Velocity,1,136.75,2.8,62.66,2007 -Radial Velocity,2,530.32,,46.84,2012 -Radial Velocity,2,3186.0,,46.84,2012 -Radial Velocity,1,277.02,1.7,80.64,2013 -Radial Velocity,1,187.83,,84.03,2012 -Radial Velocity,1,1630.0,,56.31,2012 -Radial Velocity,1,39.845,1.04,17.43,1997 -Radial Velocity,1,3.3135,3.9,15.6,1996 -Radial Velocity,1,305.5,20.6,92.51,2013 -Radial Velocity,4,4.617033,0.6876,13.47,1996 -Radial Velocity,4,241.258,1.981,13.47,1999 -Radial Velocity,4,1276.46,4.132,13.47,1999 -Radial Velocity,4,3848.86,1.059,13.47,2010 -Imaging,1,318280.0,,7.69,2008 -Imaging,1,,,145.0,2008 -Imaging,1,,,36.0,2013 -Imaging,1,,,140.0,2010 -Imaging,1,4639.15,,12.21,2009 -Imaging,1,,,52.4,2004 -Imaging,1,7336.5,,25.0,2009 -Imaging,1,8679.7,,26.67,2009 -Radial Velocity,1,655.6,5.1,37.54,2008 -Radial Velocity,1,714.3,10.6,,2007 -Radial Velocity,1,3.48777,,80.0,2000 -Radial Velocity,2,5.6,0.045,42.09,2009 -Radial Velocity,2,237.6,0.33,42.09,2009 -Radial Velocity,2,3.8728,0.027,20.1,2013 -Radial Velocity,2,125.94,0.17,20.1,2013 -Radial Velocity,1,268.94,1.47,50.03,2009 -Radial Velocity,1,137.48,1.11,175.44,2013 -Radial Velocity,2,379.63,21.42,,2009 -Radial Velocity,2,621.99,12.47,,2009 -Radial Velocity,1,578.2,,,2012 -Radial Velocity,1,392.6,0.91,,2011 -Imaging,1,10037.5,,23.1,2011 -Imaging,1,,,,2006 -Transit,1,1.5089557,,,2008 -Transit,1,1.7429935,,200.0,2008 -Transit,1,4.2568,,680.0,2008 -Transit,1,9.20205,,,2008 -Transit,1,4.0378962,,,2009 -Transit,1,8.886593,,,2009 -Transit,2,0.853585,,150.0,2009 -Radial Velocity,2,3.698,,150.0,2009 -Transit,1,6.21229,,380.0,2010 -Transit,1,95.2738,,460.0,2009 -Transit,1,13.2406,,345.0,2010 -Transit,1,2.99433,,560.0,2010 -Transit,1,2.828042,,1150.0,2010 -Transit,1,4.03519,,1060.0,2010 -Transit,1,1.51214,,1340.0,2010 -Transit,1,5.35227,,840.0,2011 -Transit,1,3.7681,,920.0,2011 -Transit,1,1.9000693,,870.0,2011 -Transit,1,3.89713,,770.0,2011 -Transit,1,9.24285,,1230.0,2011 -Transit,1,3.6313,,600.0,2011 -Transit,1,3.57532,,,2014 -Astrometry,1,246.36,,20.77,2013 -Radial Velocity,1,15.76491,3.91,10.91,1999 -Radial Velocity,3,8.631,0.035,14.97,2013 -Radial Velocity,3,25.6,0.024,14.97,2013 -Radial Velocity,3,603.0,0.079,14.97,2013 -Radial Velocity,1,2288.0,0.82,12.12,2009 -Radial Velocity,2,692.0,1.894,15.1,2007 -Radial Velocity,2,7100.0,1.6,15.1,2011 -Radial Velocity,1,4100.0,2.3,20.03,2013 -Radial Velocity,1,7.3709,0.018,9.04,2011 -Radial Velocity,1,2.64385,,10.23,2004 -Imaging,1,,,17.95,2013 -Radial Velocity,4,5.36874,0.049,6.27,2005 -Radial Velocity,4,12.9292,0.017,6.27,2005 -Radial Velocity,4,66.8,0.022,6.27,2005 -Radial Velocity,4,3.14942,0.006,6.27,2005 -Radial Velocity,1,598.3,0.328,10.32,2009 -Radial Velocity,6,7.2004,0.018,6.8,2011 -Radial Velocity,6,28.14,0.012,6.8,2011 -Radial Velocity,6,91.61,0.016,6.8,2013 -Radial Velocity,6,62.24,0.008,6.8,2013 -Radial Velocity,6,39.026,0.008,6.8,2013 -Radial Velocity,6,256.2,0.014,6.8,2013 -Radial Velocity,1,4.6938,0.035,4.54,2007 -Radial Velocity,3,1050.3,4.95,16.13,2010 -Radial Velocity,3,3.6,0.014,16.13,2012 -Radial Velocity,3,35.37,0.036,16.13,2012 -Radial Velocity,1,3416.0,0.64,4.94,2008 -Radial Velocity,1,1845.0,0.91,8.77,2006 -Radial Velocity,4,61.1166,2.2756,4.7,1998 -Radial Velocity,4,30.0881,0.7142,4.7,2000 -Radial Velocity,4,1.93778,0.021,4.7,2005 -Radial Velocity,4,124.26,0.046,4.7,2010 -Transit,1,1.58040482,,,2009 -Radial Velocity,1,133.71,3.37,17.62,2000 -Radial Velocity,1,3.33714,,25.2,2012 -Radial Velocity,1,2.64561,0.022,19.8,2010 -Imaging,1,,,145.0,2010 -Transit,1,4.4652934,,139.0,2006 -Transit,1,5.6334729,,135.32,2007 -Transit,1,2.899736,,138.0,2007 -Transit,1,3.056536,,314.0,2007 -Transit,1,2.788491,,342.0,2007 -Transit,1,3.852985,,261.0,2007 -Transit,1,3.0763776,,230.0,2008 -Transit,1,3.92289,,480.0,2008 -Transit,1,3.2130598,,142.5,2009 -Transit,2,2.91626,,214.0,2009 -Radial Velocity,2,428.5,15.2,214.0,2009 -Transit,1,4.627669,,205.0,2010 -Transit,1,10.863502,,190.0,2010 -Transit,1,2.77596,,235.0,2010 -Transit,2,10.338523,,90.0,2010 -Radial Velocity,2,5584.0,3.4,90.0,2010 -Transit,1,5.508023,,166.0,2010 -Transit,1,4.008778,,215.0,2010 -Transit,1,2.875317,,70.0,2010 -Transit,1,4.124481,,254.0,2010 -Transit,1,3.21222,,82.0,2010 -Transit,1,1.212884,,393.0,2010 -Transit,1,3.35524,,396.0,2010 -Transit,1,3.652836,,297.0,2010 -Transit,1,4.234516,,134.0,2010 -Transit,1,3.039577,,204.0,2011 -Transit,1,3.257215,,395.0,2011 -Transit,1,5.723186,,322.0,2011 -Transit,1,2.810595,,193.0,2011 -Transit,2,5.005425,,354.0,2011 -Radial Velocity,2,1022.0,3.4,354.0,2011 -Transit,1,2.150008,,283.0,2011 -Transit,1,3.474474,,387.0,2011 -Transit,1,5.452654,,257.0,2011 -Transit,1,3.646706,,535.0,2011 -Transit,1,1.327347,,317.0,2011 -Transit,1,2.797436,,411.0,2011 -Transit,1,4.640382,,249.0,2012 -Transit,1,3.54387,,642.0,2012 -Transit,1,4.457243,,501.0,2012 -Transit,1,2.694047,,344.0,2012 -Transit,1,4.641878,,447.0,2012 -Transit,1,3.332687,,542.0,2012 -Transit,1,2.691548,,322.0,2014 -Transit,1,3.446459,,303.0,2012 -Transit,1,1.354133,,360.0,2013 -Transit,1,3.547851,,453.0,2013 -Radial Velocity,2,349.7,1.25,25.64,2001 -Radial Velocity,2,6005.0,5.3,25.64,2012 -Radial Velocity,2,5.7727,0.024,23.44,2009 -Radial Velocity,2,13.505,0.0186,23.44,2011 -Radial Velocity,1,431.8,3.1,167.5,2011 -Radial Velocity,1,533.0,6.1,7.01,2010 -Radial Velocity,1,1183.0,4.9,89.85,2002 -Radial Velocity,1,3.4442,0.48,53.71,2005 -Radial Velocity,1,311.6,1.6,115.21,2013 -Radial Velocity,1,62.218,0.229,11.11,2003 -Radial Velocity,1,526.62,1.56,44.05,2007 -Radial Velocity,1,829.0,0.8,32.7,2001 -Radial Velocity,1,15.609,0.0405,21.85,2005 -Radial Velocity,1,431.88,2.07,77.82,2001 -Radial Velocity,1,356.0,2.3,131.41,2009 -Radial Velocity,2,360.2,2.37,56.5,2012 -Radial Velocity,2,2732.0,2.37,56.5,2012 -Radial Velocity,1,675.0,1.94,100.0,2007 -Radial Velocity,1,777.0,1.96,53.28,2009 -Radial Velocity,1,792.6,,58.17,2012 -Radial Velocity,1,177.11,7.6,150.6,2011 -Radial Velocity,1,22.09,0.48,40.32,2003 -Radial Velocity,1,2496.0,1.65,55.01,2009 -Radial Velocity,1,615.0,0.29,35.88,2011 -Radial Velocity,2,1275.0,1.11,38.52,2011 -Radial Velocity,2,4046.0,2.0,38.52,2011 -Radial Velocity,1,5.3978,0.029,16.82,2008 -Radial Velocity,1,1313.0,0.63,56.34,2010 -Radial Velocity,1,227.0,1.8,44.15,2002 -Radial Velocity,1,1634.0,14.2,38.26,2009 -Radial Velocity,2,30.052,0.7,52.85,2009 -Radial Velocity,2,192.9,1.82,52.85,2009 -Radial Velocity,6,5.75962,0.041,39.39,2010 -Radial Velocity,6,16.3567,0.038,39.39,2010 -Radial Velocity,6,49.747,0.08,39.39,2010 -Radial Velocity,6,122.72,0.074,39.39,2010 -Radial Velocity,6,602.0,0.067,39.39,2010 -Radial Velocity,6,2248.0,0.205,39.39,2010 -Radial Velocity,1,989.2,0.94,17.35,2006 -Radial Velocity,1,1075.2,6.21,32.56,1999 -Radial Velocity,2,1270.2,3.44,53.82,2007 -Radial Velocity,2,170.455,0.82,53.82,2008 -Radial Velocity,1,711.0,6.54,66.49,2005 -Radial Velocity,2,1945.0,0.622,33.98,2005 -Radial Velocity,2,37.91,0.0788,33.98,2008 -Radial Velocity,2,262.709,2.3,37.16,2000 -Radial Velocity,2,1708.0,1.92,37.16,2002 -Radial Velocity,1,471.6,14.0,300.3,2005 -Radial Velocity,2,14.182,0.0325,28.6,2011 -Radial Velocity,2,53.832,0.036,28.6,2011 -Radial Velocity,2,19.382,0.865,66.89,2013 -Radial Velocity,2,931.0,5.13,66.89,2013 -Radial Velocity,1,4218.0,1.88,45.52,2009 -Radial Velocity,1,75.523,0.26,35.91,2000 -Radial Velocity,1,17.24,0.0696,25.54,2008 -Radial Velocity,1,990.0,4.4,59.84,2009 -Radial Velocity,1,465.1,14.3,50.18,2009 -Radial Velocity,1,359.9,4.6,,2007 -Radial Velocity,1,21.21663,,78.25,2007 -Radial Velocity,1,772.0,2.7,127.88,2011 -Radial Velocity,1,466.2,1.37,22.38,2003 -Radial Velocity,2,11.849,0.0378,43.08,2011 -Radial Velocity,2,33.823,0.0422,43.08,2011 -Radial Velocity,1,500.0,1.07,27.13,2002 -Radial Velocity,3,18.315,0.0085,6.06,2011 -Radial Velocity,3,40.114,0.00755,6.06,2011 -Radial Velocity,3,90.309,0.0151,6.06,2011 -Radial Velocity,2,29.15,0.0379,35.89,2011 -Radial Velocity,2,85.131,0.0496,35.89,2011 -Radial Velocity,1,591.9,1.9,36.02,2006 -Radial Velocity,1,380.85,1.99,44.5,2008 -Radial Velocity,2,22.656,0.0322,32.31,2011 -Radial Velocity,2,53.881,0.06472,32.31,2011 -Radial Velocity,1,1214.0,1.5,89.13,2006 -Radial Velocity,1,738.459,2.61,34.6,2001 -Radial Velocity,1,528.07,13.65,31.79,2011 -Radial Velocity,1,1561.0,7.71,51.97,2003 -Radial Velocity,1,3668.0,4.01,46.51,2006 -Radial Velocity,1,1845.0,0.95,56.05,2010 -Radial Velocity,1,423.841,1.28,18.24,2000 -Radial Velocity,1,2208.0,1.45,44.54,2012 -Radial Velocity,1,17.991,0.62,42.37,2005 -Radial Velocity,1,1117.0,1.16,56.18,2010 -Radial Velocity,1,385.9,5.59,39.56,2001 -Radial Velocity,1,387.1,1.7,168.92,2011 -Radial Velocity,1,2819.654,9.17,54.71,2002 -Radial Velocity,1,1159.2,1.373,26.5,2009 -Radial Velocity,1,912.0,1.8,121.07,2011 -Radial Velocity,1,466.0,0.5,53.82,2010 -Radial Velocity,3,16.546,0.0363,38.01,2011 -Radial Velocity,3,51.284,0.0498,38.01,2011 -Radial Velocity,3,274.49,0.0519,38.01,2011 -Radial Velocity,1,326.6,1.3,136.8,2011 -Radial Velocity,1,18.179,0.33,86.88,2006 -Radial Velocity,1,157.54,3.04,117.37,2009 -Radial Velocity,1,1049.0,0.79,45.01,2009 -Radial Velocity,1,388.0,9.1,20.98,2005 -Radial Velocity,1,363.2,,37.78,2010 -Radial Velocity,3,154.46,0.61,33.24,2002 -Radial Velocity,3,2295.0,0.683,33.24,2002 -Radial Velocity,3,843.6,0.624,33.24,2005 -Radial Velocity,1,2063.818,10.35,18.21,2001 -Radial Velocity,2,55.0,2.3,42.88,2004 -Radial Velocity,2,2720.0,3.366,42.88,2012 -Radial Velocity,3,5.6363,0.0117,25.59,2011 -Radial Velocity,3,14.025,0.0187,25.59,2011 -Radial Velocity,3,33.941,0.0162,25.59,2011 -Radial Velocity,2,14.3098,0.839,42.43,2000 -Radial Velocity,2,2140.2,13.38,42.43,2000 -Radial Velocity,1,696.3,10.7,99.4,2009 -Radial Velocity,1,407.15,0.0961,15.56,2011 -Radial Velocity,6,4.3123,0.0126,12.83,2008 -Radial Velocity,6,9.6184,0.0208,12.83,2008 -Radial Velocity,6,20.432,0.0299,12.83,2008 -Radial Velocity,6,34.62,0.011,12.83,2012 -Radial Velocity,6,51.76,0.0164,12.83,2012 -Radial Velocity,6,197.8,0.0223,12.83,2012 -Radial Velocity,1,264.15,4.01,33.33,2002 -Radial Velocity,1,963.0,2.54,43.03,2004 -Radial Velocity,1,1.3283,18.37,43.03,2003 -Radial Velocity,2,18.357,0.039,52.03,2013 -Radial Velocity,2,25.648,0.027,52.03,2013 -Radial Velocity,1,327.8,0.6,54.94,2010 -Radial Velocity,1,2371.0,25.0,37.05,2008 -Radial Velocity,1,36.96,2.49,93.2,2007 -Radial Velocity,1,472.0,0.58,50.43,2010 -Radial Velocity,1,5.8872,0.04,22.04,2011 -Radial Velocity,2,226.93,0.1872,32.58,2008 -Radial Velocity,2,342.85,0.6579,32.58,2008 -Radial Velocity,1,890.76,1.79,48.95,2004 -Radial Velocity,1,43.6,0.47,36.14,2008 -Radial Velocity,1,3.024,0.249,33.41,2000 -Radial Velocity,2,4.0845,0.07167000000000001,37.84,2008 -Radial Velocity,2,1353.6,0.35061,37.84,2008 -Radial Velocity,2,430.0,5.0,121.36,2002 -Radial Velocity,2,2500.0,7.0,121.36,2008 -Radial Velocity,1,700.0,1.16,87.41,2008 -Radial Velocity,1,4.9437,0.115,40.73,2002 -Radial Velocity,1,2582.7,1.71,47.26,2005 -Radial Velocity,1,1279.0,4.9,31.03,2002 -Radial Velocity,2,14.07,0.0413,34.07,2011 -Radial Velocity,2,95.415,0.0565,34.07,2011 -Radial Velocity,1,118.96,1.13,28.07,2000 -Radial Velocity,1,303.0,5.25,92.51,2003 -Radial Velocity,2,201.83,3.1548,25.7,2008 -Radial Velocity,2,607.06,7.4634,25.7,2008 -Radial Velocity,1,2.817822,0.38,35.8,2005 -Radial Velocity,1,589.64,2.9,10.34,2006 -Radial Velocity,1,358.0,0.64,32.62,2009 -Radial Velocity,2,572.4,1.26,35.59,2003 -Radial Velocity,2,152.6,0.17,35.59,2011 -Radial Velocity,1,480.5,6.0,80.06,2012 -Radial Velocity,1,1973.0,2.82,55.04,2005 -Radial Velocity,1,6.276,1.9,58.82,2001 -Radial Velocity,3,8.667,0.033,12.58,2006 -Radial Velocity,3,31.56,0.038,12.58,2006 -Radial Velocity,3,197.0,0.058,12.58,2006 -Radial Velocity,1,851.8,6.1,,2007 -Radial Velocity,1,2231.0,2.0,28.76,2003 -Radial Velocity,1,3383.0,3.15,51.36,2002 -Radial Velocity,1,1260.0,3.06,53.05,2008 -Radial Velocity,1,2.54858,1.87,36.52,2003 -Radial Velocity,2,188.9,2.25,94.61,2002 -Radial Velocity,2,379.1,2.25,94.61,2005 -Radial Velocity,1,1800.0,1.15,96.99,2008 -Radial Velocity,3,51.645,1.8,64.56,2003 -Radial Velocity,3,2473.0,8.06,64.56,2003 -Radial Velocity,3,346.6,0.396,64.56,2007 -Radial Velocity,1,3.51,0.42,28.94,1999 -Radial Velocity,1,418.2,2.51,80.58,2007 -Radial Velocity,1,3.971,0.197,59.7,2002 -Radial Velocity,1,5.7361,,41.27,2012 -Radial Velocity,1,1966.1,1.34,48.64,2011 -Radial Velocity,1,111.4357,,29.04,2001 -Radial Velocity,1,1001.7,6.86,32.56,2005 -Radial Velocity,1,184.02,2.7,88.26,2007 -Radial Velocity,2,441.47,,27.46,2003 -Radial Velocity,2,220.078,,27.46,2003 -Radial Velocity,1,705.0,1.3,112.23,2011 -Radial Velocity,1,2.985625,0.4,43.53,2002 -Radial Velocity,1,788.0,0.132,33.96,2009 -Radial Velocity,1,58.43,0.01133,11.15,2011 -Radial Velocity,1,2.1375,1.5,91.16,2006 -Radial Velocity,1,1695.0,0.92,42.48,2009 -Radial Velocity,1,1475.0,7.0,72.57,2009 -Radial Velocity,1,2754.0,1.78,18.06,2009 -Radial Velocity,1,3.416,0.22,74.46,2004 -Radial Velocity,1,2157.0,1.78,30.88,2009 -Radial Velocity,1,256.78,8.44,38.99,1999 -Radial Velocity,1,49.77,0.057,22.09,2009 -Radial Velocity,1,325.81,3.86,32.32,2000 -Radial Velocity,1,143.58,0.37,28.9,2005 -Radial Velocity,2,13.186,0.0263,42.52,2011 -Radial Velocity,2,46.025,0.0318,42.52,2011 -Imaging,1,,,91.57,2013 -Radial Velocity,1,507.0,1.2,149.03,2009 -Radial Velocity,1,361.1,0.9,132.8,2011 -Radial Velocity,1,498.9,0.68,84.03,2009 -Radial Velocity,1,647.3,4.0,221.24,2011 -Radial Velocity,2,8.1256,0.0284,26.21,2011 -Radial Velocity,2,103.49,0.04,26.21,2011 -Radial Velocity,1,9.494,0.026,21.3,2010 -Radial Velocity,1,436.9,1.8,150.38,2011 -Radial Velocity,1,4951.0,6.8,42.77,2012 -Radial Velocity,1,439.3,0.502,60.46,2005 -Radial Velocity,2,17.054,0.087,17.99,2004 -Radial Velocity,2,4970.0,0.36,17.99,2010 -Radial Velocity,1,868.0,1.4,130.89,2011 -Radial Velocity,1,157.57,1.7,140.85,2011 -Radial Velocity,1,383.7,1.16,52.8,2007 -Radial Velocity,1,70.46,0.3,30.5,2005 -Radial Velocity,1,20.8133,0.172,42.0,2005 -Radial Velocity,1,4.113775,0.45,28.98,2006 -Radial Velocity,2,127.58,5.9,164.2,2008 -Radial Velocity,2,520.0,2.6,164.2,2008 -Radial Velocity,1,122.1,0.05,9.24,2010 -Radial Velocity,1,778.1,5.9,138.5,2011 -Radial Velocity,1,6.495,0.96,121.07,2010 -Radial Velocity,1,47.84,0.098,49.33,2009 -Radial Velocity,1,5.8881,0.367,53.08,2013 -Radial Velocity,1,55.806,0.186,20.82,2009 -Radial Velocity,1,199.505,8.3,102.04,2003 -Radial Velocity,1,1531.0,6.92,37.44,2002 -Radial Velocity,1,2890.0,11.0,87.87,2011 -Radial Velocity,1,3630.0,9.61,36.36,2012 -Imaging,1,,,91.83,2013 -Radial Velocity,1,48.056,0.21,51.26,2005 -Radial Velocity,1,10.8985,0.261,38.56,2002 -Radial Velocity,1,443.4,2.6,138.5,2011 -Radial Velocity,2,395.8,1.29,68.54,2002 -Radial Velocity,2,1624.0,0.99,68.54,2005 -Radial Velocity,1,68.27,0.77,65.62,2010 -Radial Velocity,2,7.8543,0.054,56.92,2013 -Radial Velocity,2,30.93,0.076,56.92,2013 -Radial Velocity,1,5.24,0.28,59.03,2005 -Radial Velocity,1,835.477,11.09,97.66,2009 -Radial Velocity,1,1143.0,6.8,28.88,2003 -Radial Velocity,1,324.0,2.83,37.42,2013 -Radial Velocity,2,263.3,0.27,15.71,2010 -Radial Velocity,2,1657.0,0.71,15.71,2010 -Radial Velocity,2,937.7,1.24,28.04,2003 -Radial Velocity,2,1046.0,,28.04,2011 -Radial Velocity,1,3827.0,0.48,20.48,2014 -Radial Velocity,1,83.888,11.68,40.57,1989 -Radial Velocity,1,493.7,1.1,20.43,2001 -Radial Velocity,1,1114.0,0.95,35.0,2002 -Radial Velocity,1,670.0,2.1,110.62,2011 -Radial Velocity,1,2597.0,1.88,33.01,2004 -Radial Velocity,1,25.827,0.178,38.02,2004 -Radial Velocity,1,6.1335,2.13,88.57,2005 -Radial Velocity,1,2082.0,4.5,97.66,2013 -Radial Velocity,1,63.33,1.22,44.37,2003 -Radial Velocity,1,344.95,3.71,133.16,2003 -Radial Velocity,3,559.4,3.0,52.83,2007 -Radial Velocity,3,4.1547,0.058,52.83,2009 -Radial Velocity,3,3008.0,7.2,52.83,2009 -Radial Velocity,1,9.6737,0.041,27.45,2009 -Radial Velocity,1,1244.0,0.38,68.35,2009 -Radial Velocity,1,948.12,0.224,38.05,2011 -Radial Velocity,2,454.2,1.45,16.57,2002 -Radial Velocity,2,923.8,3.24,16.57,2005 -Radial Velocity,1,1840.0,1.6,67.61,2009 -Radial Velocity,1,10.7085,1.04,29.76,1999 -Radial Velocity,1,883.0,2.2,121.36,2011 -Radial Velocity,1,1951.0,18.15,57.21,2008 -Radial Velocity,1,974.0,5.61,70.97,2007 -Radial Velocity,1,1544.0,1.49,98.52,2011 -Radial Velocity,2,3.27,0.0351,24.15,2011 -Radial Velocity,2,1160.9,0.1507,24.15,2011 -Radial Velocity,2,258.19,1.59,25.65,1999 -Radial Velocity,2,5000.0,0.82,25.65,2009 -Radial Velocity,3,12.083,0.0292,26.91,2011 -Radial Velocity,3,59.519,0.0382,26.91,2011 -Radial Velocity,3,459.26,0.121,26.91,2011 -Radial Velocity,1,464.3,2.0,106.38,2009 -Radial Velocity,3,11.577,0.0166,14.56,2011 -Radial Velocity,3,27.582,0.0358,14.56,2011 -Radial Velocity,3,106.72,0.03,14.56,2011 -Radial Velocity,1,330.0,0.223,38.45,2011 -Radial Velocity,1,1125.7,9.76,121.36,2008 -Radial Velocity,1,653.22,9.7,33.46,2002 -Radial Velocity,1,1928.0,5.1,35.87,2005 -Radial Velocity,1,1299.0,1.9,106.38,2011 -Radial Velocity,1,386.3,1.62,34.57,2003 -Radial Velocity,1,1057.0,3.12,59.35,2008 -Radial Velocity,1,176.3,2.9,126.1,2010 -Radial Velocity,1,103.95,5.76,55.19,2008 -Radial Velocity,2,44.236,2.12,42.68,2009 -Radial Velocity,2,1008.0,6.56,42.68,2009 -Radial Velocity,1,528.4,1.21,12.87,2003 -Radial Velocity,1,1027.0,0.85,53.05,2009 -Radial Velocity,1,331.5,0.96,59.28,2009 -Radial Velocity,1,2.8758911,,78.86,2005 -Radial Velocity,1,4.072,1.33,63.49,2005 -Radial Velocity,1,2.391,15.5,76.51,2009 -Radial Velocity,1,1096.2,0.168,29.55,2011 -Radial Velocity,1,2097.0,3.0,85.18,2009 -Radial Velocity,1,689.0,1.5,182.82,2011 -Radial Velocity,1,499.4,2.73,51.87,2008 -Radial Velocity,1,18.596,0.0193,18.08,2011 -Radial Velocity,1,3342.0,0.947,18.06,2006 -Radial Velocity,1,163.9,5.02,65.79,2008 -Radial Velocity,2,408.6,2.24,68.54,2004 -Radial Velocity,2,3452.0,2.58,68.54,2014 -Radial Velocity,2,194.3,0.85,43.4,2007 -Radial Velocity,2,391.9,0.82,43.4,2007 -Radial Velocity,1,131.05,9.71,35.37,2011 -Radial Velocity,1,842.0,0.75,55.1,2009 -Radial Velocity,1,4.6455,0.013,24.05,2010 -Radial Velocity,1,359.5546,10.57,49.0,2007 -Radial Velocity,1,104.84,0.12,33.48,2011 -Radial Velocity,1,521.0,1.8,114.15,2011 -Radial Velocity,2,12.62,1.13,68.59,2013 -Radial Velocity,2,248.4,1.9,68.59,2013 -Radial Velocity,2,1178.4,2.1,52.72,2006 -Radial Velocity,2,352.3,0.73,52.72,2012 -Radial Velocity,4,643.25,,15.28,2000 -Radial Velocity,4,4205.8,1.814,15.28,2004 -Radial Velocity,4,9.6386,,15.28,2004 -Radial Velocity,4,310.55,,15.28,2006 -Radial Velocity,1,8.428198,14.4,31.26,2002 -Radial Velocity,2,75.29,0.77,69.44,2010 -Radial Velocity,2,1314.0,2.29,69.44,2010 -Radial Velocity,1,282.4,0.48,51.81,2010 -Radial Velocity,1,606.4,2.7,37.98,2009 -Radial Velocity,1,1155.0,0.36,21.92,2005 -Radial Velocity,1,5144.0,3.53,42.99,2012 -Radial Velocity,1,420.77,1.7,50.0,2007 -Radial Velocity,2,58.11289,8.02,37.88,1998 -Radial Velocity,2,1749.5,18.1,37.88,2000 -Radial Velocity,1,6.403,0.23,43.12,2002 -Radial Velocity,2,225.62,2.88,36.32,2000 -Radial Velocity,2,2102.0,4.04,36.32,2003 -Radial Velocity,1,1145.0,0.67,64.98,2007 -Radial Velocity,1,538.0,1.83,,2007 -Radial Velocity,1,1523.0,2.6,44.05,2009 -Radial Velocity,1,323.6,2.7,134.95,2008 -Radial Velocity,1,1290.0,7.8,67.02,2009 -Radial Velocity,1,297.3,0.61,127.55,2007 -Astrometry,1,1016.0,,14.98,2010 -Radial Velocity,2,406.6,1.49,59.03,1999 -Radial Velocity,2,110.9,0.15,59.03,2010 -Radial Velocity,1,71.484,7.03,46.73,2001 -Radial Velocity,1,14.476,0.08,63.69,2008 -Radial Velocity,1,3.0925,0.95,27.05,2000 -Radial Velocity,1,396.03,,131.75,2010 -Radial Velocity,1,479.0,1.6,114.55,2009 -Radial Velocity,1,663.0,3.3,115.47,2009 -Radial Velocity,3,9.3743,0.0238,26.15,2008 -Radial Velocity,3,962.0,0.64,26.15,2008 -Radial Velocity,3,2172.0,0.54,26.15,2008 -Radial Velocity,1,956.0,0.37,55.93,2009 -Radial Velocity,2,634.23,3.69,52.83,2004 -Radial Velocity,2,2950.0,3.82,52.83,2008 -Radial Velocity,1,6.838,0.94,47.37,2006 -Radial Velocity,1,986.0,0.75,44.98,2006 -Radial Velocity,2,3.097,0.52,47.92,1998 -Radial Velocity,2,3810.0,1.99,47.92,1999 -Radial Velocity,1,456.46,1.26,52.63,2004 -Radial Velocity,1,14.275,0.0316,17.72,2011 -Radial Velocity,1,2.21857578,,19.25,2005 -Radial Velocity,1,1136.1,5.93,62.11,2002 -Radial Velocity,2,2891.0,1.502,15.89,2003 -Radial Velocity,2,17.1,0.057,15.89,2005 -Radial Velocity,1,1038.1,1.9,54.23,2007 -Radial Velocity,1,4885.0,3.1,189.39,2009 -Radial Velocity,1,24.348,0.72,19.89,1999 -Radial Velocity,2,74.72,0.05318,8.82,2011 -Radial Velocity,2,525.8,0.07552,8.82,2011 -Radial Velocity,1,351.5,2.5,67.39,2007 -Radial Velocity,1,18.20163,3.7,37.35,1999 -Radial Velocity,1,1289.0,3.0,46.93,2002 -Radial Velocity,1,3638.0,6.9,43.57,2012 -Radial Velocity,1,1333.0,2.58,32.99,2007 -Radial Velocity,1,1035.7,0.79,32.83,2011 -Radial Velocity,2,613.8,1.85,68.35,2010 -Radial Velocity,2,825.0,0.895,68.35,2010 -Radial Velocity,2,255.87,17.4,46.34,2002 -Radial Velocity,2,1383.4,2.44,46.34,2004 -Imaging,1,,,40.85,2006 -Radial Velocity,3,1931.0,4.05,47.3,2009 -Radial Velocity,3,34.873,0.054,47.3,2011 -Radial Velocity,3,2831.6,1.68,47.3,2012 -Radial Velocity,1,1733.0,0.266,26.95,2011 -Radial Velocity,1,279.8,1.37,90.33,2008 -Radial Velocity,1,610.0,2.2,170.07,2009 -Radial Velocity,2,161.97,,55.31,2012 -Radial Velocity,2,1155.7,,55.31,2012 -Radial Velocity,1,123.0,0.45,43.99,2004 -Radial Velocity,1,875.5,9.9,352.11,2012 -Radial Velocity,1,3.52474859,,47.08,1999 -Radial Velocity,1,442.1,1.23,21.29,1998 -Radial Velocity,1,354.8,,55.93,2007 -Radial Velocity,1,2.245715,0.45,52.72,2005 -Radial Velocity,1,373.3,2.3,121.8,2009 -Radial Velocity,1,951.0,4.5,40.75,2001 -Radial Velocity,2,7.2825,0.0087,21.52,2011 -Radial Velocity,2,10.866,0.0097,21.52,2011 -Radial Velocity,2,191.99,0.101,38.64,2011 -Radial Velocity,2,2277.0,0.246,38.64,2011 -Radial Velocity,2,3.93,0.017,43.53,2009 -Radial Velocity,2,567.0,0.33,43.53,2009 -Radial Velocity,1,1311.0,1.26,33.29,2002 -Radial Velocity,1,1294.0,2.1,26.52,2002 -Radial Velocity,1,118.45,0.65,37.89,2003 -Radial Velocity,2,7.126816,1.39,19.72,1998 -Radial Velocity,2,4270.0,2.6,19.72,2005 -Radial Velocity,1,1319.0,13.0,54.92,2010 -Radial Velocity,1,225.7,0.21,29.94,2010 -Radial Velocity,1,5501.0,10.39,29.2,2012 -Radial Velocity,1,2093.3,,,2012 -Radial Velocity,1,3.8335,0.06,81.1,2007 -Radial Velocity,1,672.1,11.1,289.02,2012 -Radial Velocity,1,2209.0,1.06,45.23,2012 -Radial Velocity,1,3724.7,1.45,48.43,2011 -Radial Velocity,1,456.1,3.09,52.88,2007 -Radial Velocity,1,3999.0,1.9,50.45,2011 -Radial Velocity,1,572.38,7.75,41.95,1999 -Radial Velocity,1,26.73,0.71,94.07,2006 -Radial Velocity,1,141.6,1.08,108.46,2007 -Radial Velocity,1,192.0,6.575,,2013 -Radial Velocity,1,501.75,6.9,,2009 -Radial Velocity,1,745.7,5.3,307.69,2011 -Radial Velocity,1,2443.0,2.54,54.92,2009 -Radial Velocity,1,8.7836,0.0265,9.42,2008 -Radial Velocity,1,3.369,0.76,50.2,2004 -Radial Velocity,2,345.72,1.42,44.8,2009 -Radial Velocity,2,9017.8,,44.8,2011 -Radial Velocity,1,57.0,0.47,23.8,2010 -Radial Velocity,1,16.2,1.25,223.21,2010 -Radial Velocity,3,6.673855,3.88,52.88,2005 -Radial Velocity,3,147.73,1.28,52.88,2006 -Radial Velocity,3,952.0,0.57,52.88,2009 -Radial Velocity,1,41.397,0.298,11.03,2010 -Radial Velocity,3,8.1352,0.036,25.87,2011 -Radial Velocity,3,32.03,0.41,25.87,2011 -Radial Velocity,3,431.7,0.527,25.87,2011 -Imaging,1,,,11.43,2010 -Radial Velocity,1,124.6,9.18,149.25,2013 -Radial Velocity,1,17337.5,9.0,23.98,2009 -Radial Velocity,1,511.098,8.82,31.33,2002 -Imaging,1,,,131.93,2010 -Radial Velocity,1,111.7,2.1,14.9,2009 -Radial Velocity,1,5.0505,1.068,44.46,2013 -Radial Velocity,1,311.288,1.94,17.24,1999 -Imaging,4,170000.0,,39.94,2008 -Imaging,4,69000.0,,39.94,2008 -Imaging,4,37000.0,,39.94,2008 -Imaging,4,18000.0,,39.94,2010 -Transit,1,1.217514,,262.0,2012 -Transit,1,4.1137912,,124.22,2012 -Transit,1,2.7033904,1.47,178.0,2013 -Transit,1,7.8457,,222.0,2014 -Transit,1,2.47063,,213.0,2006 -Transit,1,2.204737,,320.0,2008 -Transit,1,4.8878162,,38.0,2008 -Transit,1,3.21346,,550.0,2009 -Transit,1,3.54846,,,2009 -Transit,1,3.234723,,,2009 -Transit,1,4.885525,,,2009 -Transit,1,3.52254,,1330.0,2010 -Transit,3,19.24,,650.0,2010 -Transit,3,38.91,,650.0,2010 -Transit,3,1.592851,,650.0,2010 -Transit,2,0.837495,,173.0,2011 -Transit,2,45.29485,,173.0,2011 -Transit,6,10.3039,,613.0,2010 -Transit,6,13.0241,,613.0,2010 -Transit,6,22.6845,,613.0,2010 -Transit,6,31.9996,,613.0,2010 -Transit,6,46.6888,,613.0,2010 -Transit,6,118.3807,,613.0,2010 -Transit,1,4.4379637,,600.0,2011 -Transit,1,1.7635892,,,2011 -Transit,1,6.790123,,980.0,2011 -Transit,1,4.942782,,,2011 -Transit,1,228.776,,61.0,2011 -Transit,1,1.4857108,,800.0,2011 -Transit,3,3.504725,,,2011 -Transit,3,7.64159,,,2011 -Transit,3,14.85888,,,2011 -Transit,2,9.2869944,,2119.0,2011 -Transit Timing Variations,2,160.0,,2119.0,2011 -Transit,5,3.6961219,,290.0,2011 -Transit,5,10.854092,,290.0,2011 -Transit,5,77.61184,,290.0,2011 -Transit,5,6.098493,,290.0,2011 -Transit,5,19.57706,,290.0,2011 -Transit,1,2.785755,,108.0,2011 -Transit,1,289.8623,,190.0,2011 -Transit,2,7.1073,,800.0,2011 -Transit,2,10.7421,,800.0,2011 -Transit,2,8.1453,,1200.0,2011 -Transit,2,12.3335,,1200.0,2011 -Transit,3,6.2385,,,2011 -Transit,3,12.7204,,,2011 -Radial Velocity,3,123.0,,,2014 -Transit,2,12.2829,,,2011 -Transit,2,17.2513,,,2011 -Transit,2,15.3348,,,2011 -Transit,2,31.3309,,,2011 -Transit,2,5.9123,,,2011 -Transit,2,8.9858,,,2011 -Transit,2,10.3376,,1400.0,2011 -Transit,2,13.2907,,1400.0,2011 -Transit,3,29.33434,,1400.0,2012 -Transit,3,60.323105,,1400.0,2012 -Transit,3,143.34394,,1400.0,2012 -Transit,2,20.8613,,2100.0,2011 -Transit,2,42.6318,,2100.0,2011 -Transit,5,5.90124,,303.0,2011 -Transit,5,8.7522,,303.0,2011 -Transit,5,22.7802,,303.0,2012 -Transit,5,2.896,,303.0,2012 -Transit,5,0.74296,,303.0,2012 -Transit,5,5.66793,,,2011 -Transit,5,13.17562,,,2011 -Transit,5,21.77596,,,2011 -Transit,5,31.7844,,,2011 -Transit,5,41.02902,,,2011 -Transit,1,288.822,,1499.0,2011 -Transit,1,131.458,,1645.0,2011 -Transit,2,13.83989,,470.0,2012 -Transit,2,16.23855,,470.0,2012 -Transit,3,13.367308,,66.0,2013 -Transit,3,21.301886,,66.0,2013 -Transit,3,39.792187,,66.0,2013 -Transit,1,105.599,,600.0,2012 -Transit,1,21.0874,,1200.0,2011 -Transit,1,6.87349,,2700.0,2010 -Transit,1,1.855558,,770.0,2011 -Transit,3,1.2137672,,38.7,2011 -Transit,3,0.45328509,,38.7,2011 -Transit,3,1.865169,,38.7,2011 -Transit,1,3.024095,,1950.0,2011 -Transit,1,3.24674,,2250.0,2011 -Transit,1,2.455239,,333.0,2011 -Transit,2,33.60134,,855.0,2012 -Transit Timing Variations,2,57.011,,855.0,2012 -Transit,2,49.532,,1500.0,2012 -Transit,2,303.137,,1500.0,2012 -Transit,4,4.7779803,,,2012 -Transit,4,9.6739283,,,2012 -Transit,4,42.8961,,,2014 -Radial Velocity,4,982.0,,,2014 -Transit,2,7.2037945,,,2012 -Transit,2,10.9129343,,,2012 -Transit,2,7.81254,,,2012 -Transit,2,9.37647,,,2012 -Transit,3,45.154,,,2012 -Transit,3,85.312,,,2012 -Transit Timing Variations,3,,,,2014 -Transit,2,7.8773565,,,2012 -Transit,2,16.3850021,,,2012 -Transit,2,18.6489525,,,2012 -Transit,2,38.5583038,,,2012 -Transit,2,8.0109434,,,2012 -Transit,2,12.0717249,,,2012 -Transit,2,27.9481449,,,2012 -Transit,2,42.1516418,,,2012 -Transit,2,10.5016,,,2012 -Transit,2,21.40239,,,2012 -Transit,2,5.7293196,,,2012 -Transit,2,11.6092567,,,2012 -Transit,2,10.2184954,,,2012 -Transit,2,15.5741568,,,2012 -Transit,2,11.8681707,,,2012 -Transit,2,17.9801235,,,2012 -Transit,3,7.1316185,,,2012 -Transit,3,8.9193459,,,2012 -Transit,3,11.9016171,,,2012 -Transit,1,59.87756,,,2013 -Transit,5,5.714932,,368.0,2013 -Transit,5,12.4417,,368.0,2013 -Transit,5,18.16406,,368.0,2013 -Transit,5,122.3874,,368.0,2013 -Transit,5,267.291,,368.0,2013 -Transit,1,9.4341505,,200.0,2013 -Transit,1,138.317,,1000.0,2012 -Transit,3,2.15491,,,2012 -Transit,3,5.859944,,,2012 -Transit,3,8.13123,,,2012 -Transit,1,17.815815,,1107.0,2013 -Transit,1,15.7259,,1107.0,2013 -Transit,3,5.398763,,135.0,2012 -Transit,3,9.605085,,135.0,2012 -Radial Velocity,3,580.0,0.947,135.0,2012 -Transit,2,13.722341,,,2013 -Transit,2,242.4613,,,2013 -Orbital Brightness Modulation,2,0.240104,,1180.0,2011 -Orbital Brightness Modulation,2,0.342887,,1180.0,2011 -Transit,1,3.90512,,800.0,2010 -Transit,1,7.340718,,1330.0,2013 -Transit,1,8.884924,,1140.0,2013 -Orbital Brightness Modulation,1,1.54492875,,,2013 -Transit,1,3.57878087,,570.0,2013 -Transit,1,0.355,,,2013 -Transit,2,13.485,,,2012 -Transit,2,27.402,,,2012 -Transit,2,7.053,,,2012 -Transit,2,9.522,,,2012 -Transit,2,5.955,,,2012 -Transit,2,12.04,,,2012 -Transit,2,26.444,,,2012 -Transit,2,51.538,,,2012 -Transit,2,9.77,,,2012 -Transit,2,20.09,,,2012 -Transit,2,8.726,,,2012 -Transit,2,12.883,,,2012 -Transit,2,8.306,,,2012 -Transit,2,12.513,,,2012 -Transit,1,282.5255,,,2013 -Transit,2,114.73635,,,2013 -Transit,2,191.2318,,,2013 -Transit,2,10.95416,,339.0,2013 -Transit Timing Variations,2,22.3395,,339.0,2013 -Transit,4,3.743208,,,2013 -Transit,4,10.423648,,,2013 -Transit,4,22.342989,,,2013 -Transit,4,54.32031,,,2013 -Transit,7,7.008151,,780.0,2013 -Transit,7,8.719375,,780.0,2013 -Transit,7,59.73667,,780.0,2013 -Transit,7,91.93913,,780.0,2013 -Transit,7,124.9144,,780.0,2013 -Transit,7,210.60697,,780.0,2013 -Transit,7,331.60059,,780.0,2013 -Transit,1,6.24658,,1030.0,2013 -Transit,2,13.749,,,2013 -Transit,2,26.723,,,2013 -Transit,2,4.72674,,,2014 -Radial Velocity,2,1460.0,,,2014 -Transit,2,2.50806,,,2014 -Radial Velocity,2,820.3,,,2014 -Transit,1,11.5231,,,2014 -Transit,1,16.2385,,,2014 -Transit,2,2.58664,,,2014 -Radial Velocity,2,789.0,,,2014 -Transit,1,1.54168,,,2014 -Transit,1,4.60358,,,2014 -Transit,3,6.88705,,,2014 -Transit,3,12.8159,,,2014 -Transit,3,35.3331,,,2014 -Transit,5,5.28696,,,2014 -Transit,5,7.07142,,,2014 -Transit,5,10.3117,,,2014 -Transit,5,16.1457,,,2013 -Transit,5,27.4536,,,2014 -Transit,2,15.9654,,,2014 -Transit,2,179.612,,,2014 -Transit,2,5.4122,,,2013 -Transit,4,6.16486,,,2014 -Transit,4,13.5708,,,2014 -Transit,4,23.9802,,,2014 -Transit,4,43.8445,,,2014 -Transit,2,6.48163,,,2014 -Transit,2,21.2227,,,2014 -Transit,2,4.754,,,2014 -Transit,2,8.92507,,,2014 -Transit,2,8.041,,,2013 -Transit,2,11.776,,,2013 -Transit,2,15.09,,,2013 -Transit,2,22.804,,,2013 -Transit,3,27.50868,,,2013 -Transit,2,16.092,,,2014 -Transit,2,25.5169,,,2014 -Transit,2,13.78164,,,2014 -Transit,2,23.08933,,,2014 -Transit,2,22.951,,,2013 -Transit,2,42.882,,,2013 -Transit,2,36.855,,,2013 -Transit,2,49.412,,,2013 -Transit,2,23.654,,,2013 -Transit,2,50.447,,,2013 -Transit,2,31.884,,,2013 -Transit,2,48.648,,,2013 -Transit,2,17.324,,,2013 -Transit,2,33.006,,,2013 -Transit,2,35.736,,,2013 -Transit,2,54.414,,,2013 -Transit,2,24.806,,,2013 -Transit,2,44.347,,,2013 -Transit,2,5.487,,,2013 -Transit,2,8.291,,,2013 -Transit,2,10.416,,,2013 -Transit,2,13.084,,,2013 -Transit,2,34.921,,,2013 -Transit,2,71.312,,,2013 -Transit,2,17.849,,,2013 -Transit,2,26.136,,,2013 -Transit,2,42.994,,,2013 -Transit,2,88.505,,,2013 -Transit,2,2.42629,,,2014 -Transit,2,4.62332,,,2014 -Transit,2,0.66931,,,2014 -Radial Velocity,2,3000.0,,,2014 -Transit,1,2.46502,,,2014 -Transit,1,68.9584,,,2014 -Transit,1,17.833648,,132.0,2013 -Transit,1,3.00516,,,2013 -Transit,1,1.72086123,,1056.0,2014 -Transit,1,66.262,,,2014 -Imaging,1,40000.0,,,2011 -Transit,1,3.91405,,2000.0,2007 -Microlensing,1,,,,2008 -Microlensing,1,,,,2008 -Microlensing,1,,,,2009 -Microlensing,1,,,3600.0,2013 -Microlensing,1,2780.0,,,2011 -Microlensing,1,,,,2010 -Microlensing,1,1970.0,,,2010 -Microlensing,1,,,2300.0,2012 -Microlensing,1,,,2800.0,2012 -Microlensing,1,,,7720.0,2012 -Microlensing,1,,,7560.0,2013 -Radial Velocity,1,677.8,19.8,,2007 -Radial Velocity,1,6.958,0.34,,2014 -Radial Velocity,1,5.118,0.4,,2014 -Radial Velocity,1,121.71,1.54,,2014 -Microlensing,1,,,,2004 -Microlensing,1,3600.0,,,2005 -Microlensing,1,3300.0,,,2006 -Microlensing,1,3500.0,,,2005 -Microlensing,2,1825.0,,,2008 -Microlensing,2,5100.0,,,2008 -Microlensing,1,,,,2009 -Microlensing,1,,,2570.0,2012 -Microlensing,2,,,4080.0,2012 -Microlensing,2,,,4080.0,2012 -Microlensing,1,,,1760.0,2013 -Microlensing,1,,,4970.0,2013 -Transit,1,2.4855335,,,2008 -Transit,1,3.101278,,,2004 -Transit,1,1.2119189,,,2002 -Transit,1,4.0161,,,2004 -Transit,1,1.4324752,,600.0,2004 -Transit,1,1.689868,,2500.0,2004 -Transit,1,3.9791,,,2007 -Transit,1,3.67724,,,2007 -Imaging,1,730000.0,,,2006 -Transit,1,3.1606296,,1200.0,2013 -Radial Velocity,1,4.4264,,,2012 -Radial Velocity,1,2.1451,,,2012 -Pulsar Timing,3,25.262,,,1992 -Pulsar Timing,3,66.5419,,,1992 -Pulsar Timing,3,98.2114,,,1994 -Pulsar Timing,1,36525.0,,,2003 -Pulsar Timing,1,0.09070629,,1200.0,2011 -Transit,1,1.420033,,,2010 -Transit,1,1.3371182,,,2011 -Imaging,1,,,135.0,2013 -Imaging,1,,,120.0,2013 -Imaging,1,,,,2010 -Transit,1,4.2,,8500.0,2006 -Transit,1,1.796,,8500.0,2006 -Transit,1,3.030065,,152.3,2004 -Transit,1,1.30618581,,228.0,2007 -Transit,1,3.553945,,492.0,2007 -Transit,1,1.4822446,,360.0,2011 -Imaging,1,,,,2008 -Pulsation Timing Variations,1,1170.0,,,2007 -Transit,1,2.5199449,,408.0,2007 -Transit,1,2.152226,,147.0,2007 -Transit,1,1.846835,,220.0,2007 -Transit,1,1.33823187,,300.0,2007 -Transit,1,1.62843142,,300.0,2008 -Transit,1,3.361006,,,2009 -Transit,1,4.954658,,140.0,2008 -Transit,1,8.158715,,87.0,2010 -Transit,1,3.0927616,,90.0,2008 -Transit,1,3.7224747,,121.7,2008 -Transit,1,1.091423,,,2008 -Transit,1,4.353011,,155.0,2009 -Transit,1,2.243752,,160.0,2008 -Transit,1,3.7520656,,,2009 -Transit,1,3.1186009,,,2009 -Transit,1,3.7354417,,400.0,2009 -Transit,1,0.94145299,,105.49,2009 -Transit,1,0.78884,,250.0,2009 -Transit,1,4.322482,,242.0,2010 -Transit,1,3.5327313,,300.0,2010 -Transit,1,2.9444256,,,2010 -Transit,1,2.34121242,,,2010 -Transit,1,3.764825,,,2010 -Transit,1,2.7566004,,250.0,2010 -Transit,1,3.408821,,,2010 -Transit,1,3.922727,,70.0,2010 -Transit,1,3.4059096,,360.0,2010 -Transit,1,2.718659,,,2010 -Transit,1,1.2198669,,116.14,2010 -Transit,1,4.3176782,,120.0,2010 -Transit,1,3.161575,,,2011 -Transit,1,1.5373653,,450.0,2011 -Transit,1,3.577469,,343.0,2010 -Transit,1,6.871815,,110.0,2010 -Transit,1,4.055259,,230.0,2011 -Transit,1,3.052401,,180.0,2010 -Transit,1,4.9816872,,160.0,2012 -Transit,1,0.813475,,80.0,2011 -Transit,1,2.4238039,,,2011 -Transit,1,3.1260876,,,2011 -Transit,1,1.43037,,,2011 -Transit,1,4.1591399,,200.0,2012 -Transit,1,2.143634,,,2011 -Transit,1,2.7817387,,170.0,2012 -Transit,1,1.9550959,,230.0,2011 -Transit,1,1.7497798,,140.0,2012 -Transit,1,3.6936411,,200.0,2012 -Transit,1,4.465633,,330.0,2012 -Transit,1,4.617101,,255.0,2012 -Transit,1,2.838971,,455.0,2012 -Transit,1,5.01718,,300.0,2012 -Transit,1,7.919585,,125.0,2012 -Transit,1,4.3050011,,400.0,2012 -Transit,1,3.8559,,480.0,2012 -Transit,1,4.411953,,160.0,2012 -Transit,1,4.37809,,330.0,2012 -Transit,1,1.5732918,,350.0,2012 -Transit,1,2.3114243,,310.0,2013 -Transit,1,4.086052,,380.0,2012 -Transit,1,4.61442,,225.0,2012 -Transit,1,2.9036747,,345.0,2012 -Transit,1,2.2167421,,340.0,2012 -Transit,1,2.484193,,260.0,2013 -Transit,1,1.3600309,,93.0,2012 -Transit,1,2.17517632,,550.0,2012 -Transit,1,3.6623866,,240.0,2012 -Transit,1,3.0678504,,60.0,2012 -Transit,1,0.925542,,470.0,2014 -Imaging,1,,,19.2,2011 -Transit,1,3.352057,,3200.0,2012 -Imaging,1,,,10.1,2012 -Transit,1,3.94150685,,172.0,2006 -Transit,1,2.615864,,148.0,2007 -Transit,1,3.1915239,,174.0,2007 -Transit,1,4.1250828,,293.0,2008 -Transit,1,4.187757,,260.0,2008 diff --git a/tensor/examples/grids/grids.go b/tensor/examples/grids/grids.go deleted file mode 100644 index f56658e1d6..0000000000 --- a/tensor/examples/grids/grids.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "embed" - - "cogentcore.org/core/base/errors" - "cogentcore.org/core/core" - "cogentcore.org/core/tensor/table" - "cogentcore.org/core/tensor/tensorcore" -) - -//go:embed *.tsv -var tsv embed.FS - -func main() { - pats := table.NewTable("pats") - pats.SetMetaData("name", "TrainPats") - pats.SetMetaData("desc", "Training patterns") - // todo: meta data for grid size - errors.Log(pats.OpenFS(tsv, "random_5x5_25.tsv", table.Tab)) - - b := core.NewBody("grids") - - tv := core.NewTabs(b) - - // nt, _ := tv.NewTab("First") - nt, _ := tv.NewTab("Patterns") - etv := tensorcore.NewTable(nt).SetTable(pats) - b.AddTopBar(func(bar *core.Frame) { - core.NewToolbar(bar).Maker(etv.MakeToolbar) - }) - - b.RunMainWindow() -} diff --git a/tensor/examples/grids/random_5x5_25.tsv b/tensor/examples/grids/random_5x5_25.tsv deleted file mode 100644 index 95a685223a..0000000000 --- a/tensor/examples/grids/random_5x5_25.tsv +++ /dev/null @@ -1,26 +0,0 @@ -_H: $Name %Input[2:0,0]<2:5,5> %Input[2:1,0] %Input[2:2,0] %Input[2:3,0] %Input[2:4,0] %Input[2:0,1] %Input[2:1,1] %Input[2:2,1] %Input[2:3,1] %Input[2:4,1] %Input[2:0,2] %Input[2:1,2] %Input[2:2,2] %Input[2:3,2] %Input[2:4,2] %Input[2:0,3] %Input[2:1,3] %Input[2:2,3] %Input[2:3,3] %Input[2:4,3] %Input[2:0,4] %Input[2:1,4] %Input[2:2,4] %Input[2:3,4] %Input[2:4,4] %Output[2:0,0]<2:5,5> %Output[2:1,0] %Output[2:2,0] %Output[2:3,0] %Output[2:4,0] %Output[2:0,1] %Output[2:1,1] %Output[2:2,1] %Output[2:3,1] %Output[2:4,1] %Output[2:0,2] %Output[2:1,2] %Output[2:2,2] %Output[2:3,2] %Output[2:4,2] %Output[2:0,3] %Output[2:1,3] %Output[2:2,3] %Output[2:3,3] %Output[2:4,3] %Output[2:0,4] %Output[2:1,4] %Output[2:2,4] %Output[2:3,4] %Output[2:4,4] -_D: "evt_0" 1 0 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 1 0 0 1 0 0 0 0 0 1 0 0 1 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 -_D: "evt_1" 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 1 0 0 0 1 1 1 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 0 0 1 0 1 0 1 0 0 0 0 -_D: "evt_2" 0 0 1 0 0 0 0 0 0 1 0 0 1 0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1 0 1 0 1 0 0 1 0 0 1 0 0 0 0 0 0 -_D: "evt_3" 1 0 0 0 0 0 0 1 1 0 0 0 0 0 1 0 0 1 0 0 1 0 0 0 0 1 0 1 0 0 0 0 0 1 0 1 0 0 1 0 1 0 0 0 0 0 0 0 0 0 -_D: "evt_4" 0 0 1 0 0 1 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 1 0 1 0 0 1 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 -_D: "evt_5" 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1 0 1 0 1 1 0 0 0 1 0 1 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 0 1 -_D: "evt_6" 0 1 0 0 0 1 0 1 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 1 0 1 0 0 1 0 0 0 0 0 0 1 1 0 0 -_D: "evt_7" 0 0 0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 1 0 0 1 1 0 0 1 1 1 1 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -_D: "evt_8" 0 0 0 0 0 1 0 0 0 1 0 1 0 0 1 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 0 1 1 0 1 0 0 0 1 0 -_D: "evt_9" 0 0 0 0 0 1 0 0 0 0 1 0 1 0 0 0 0 0 0 1 0 0 1 0 1 0 0 1 1 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 1 0 0 0 1 -_D: "evt_10" 0 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 1 0 0 0 1 0 1 0 0 0 1 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 1 1 1 0 0 -_D: "evt_11" 0 0 0 1 0 0 0 1 0 0 0 1 0 1 0 0 0 0 0 1 0 1 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 1 1 0 0 1 0 0 0 1 0 -_D: "evt_12" 0 0 0 0 0 0 1 0 1 1 0 0 1 0 0 0 0 0 0 1 0 0 1 0 0 0 1 0 0 0 0 0 0 0 1 0 1 0 0 0 0 1 0 0 0 1 0 1 0 0 -_D: "evt_13" 1 0 0 0 0 0 1 0 1 0 0 1 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 1 1 0 0 1 0 0 0 1 0 1 0 0 0 0 0 0 0 1 0 -_D: "evt_14" 0 1 0 0 0 1 0 1 0 0 0 0 0 1 0 0 1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 0 0 1 1 0 0 0 0 1 0 0 0 0 0 0 0 1 -_D: "evt_15" 0 0 1 1 0 0 0 0 0 0 1 0 1 0 1 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 1 1 1 0 0 0 1 0 0 0 0 0 1 0 0 -_D: "evt_16" 0 0 1 1 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 1 0 0 0 1 0 0 1 0 0 1 0 1 0 1 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 -_D: "evt_17" 0 1 0 1 0 0 0 0 0 0 0 1 0 0 0 1 0 0 0 1 1 0 0 0 0 0 0 1 1 0 0 0 0 0 1 1 0 1 0 0 0 0 1 0 0 0 0 0 0 0 -_D: "evt_18" 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 1 1 0 0 0 0 1 0 0 1 1 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 1 -_D: "evt_19" 0 0 0 0 1 0 0 1 0 0 0 1 0 0 1 0 1 0 0 1 0 0 0 0 0 0 0 1 0 0 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 1 0 1 0 0 -_D: "evt_20" 0 0 0 0 0 0 0 1 0 1 1 0 1 0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 1 0 0 0 1 0 0 0 1 0 0 1 0 0 1 0 0 1 0 -_D: "evt_21" 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 0 1 1 0 0 1 0 0 0 1 0 1 0 0 0 1 0 0 0 1 0 0 0 0 0 1 0 0 -_D: "evt_22" 1 0 0 0 0 1 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 1 0 0 1 0 0 0 0 0 0 1 1 0 0 0 0 0 1 0 0 0 1 0 0 -_D: "evt_23" 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 1 1 1 0 0 0 1 0 1 1 0 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 1 0 0 0 1 0 -_D: "evt_24" 0 0 0 1 1 0 0 0 0 0 1 0 0 1 1 0 0 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 1 0 diff --git a/tensor/io.go b/tensor/io.go deleted file mode 100644 index 00b5548fd2..0000000000 --- a/tensor/io.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tensor - -import ( - "encoding/csv" - "io" - "log" - "os" - "strconv" - - "cogentcore.org/core/core" -) - -// SaveCSV writes a tensor to a comma-separated-values (CSV) file -// (where comma = any delimiter, specified in the delim arg). -// Outer-most dims are rows in the file, and inner-most is column -- -// Reading just grabs all values and doesn't care about shape. -func SaveCSV(tsr Tensor, filename core.Filename, delim rune) error { - fp, err := os.Create(string(filename)) - defer fp.Close() - if err != nil { - log.Println(err) - return err - } - WriteCSV(tsr, fp, delim) - return nil -} - -// OpenCSV reads a tensor from a comma-separated-values (CSV) file -// (where comma = any delimiter, specified in the delim arg), -// using the Go standard encoding/csv reader conforming -// to the official CSV standard. -// Reads all values and assigns as many as fit. -func OpenCSV(tsr Tensor, filename core.Filename, delim rune) error { - fp, err := os.Open(string(filename)) - defer fp.Close() - if err != nil { - log.Println(err) - return err - } - return ReadCSV(tsr, fp, delim) -} - -////////////////////////////////////////////////////////////////////////// -// WriteCSV - -// WriteCSV writes a tensor to a comma-separated-values (CSV) file -// (where comma = any delimiter, specified in the delim arg). -// Outer-most dims are rows in the file, and inner-most is column -- -// Reading just grabs all values and doesn't care about shape. -func WriteCSV(tsr Tensor, w io.Writer, delim rune) error { - prec := -1 - if ps, ok := tsr.MetaData("precision"); ok { - prec, _ = strconv.Atoi(ps) - } - cw := csv.NewWriter(w) - if delim != 0 { - cw.Comma = delim - } - nrow := tsr.DimSize(0) - nin := tsr.Len() / nrow - rec := make([]string, nin) - str := tsr.IsString() - for ri := 0; ri < nrow; ri++ { - for ci := 0; ci < nin; ci++ { - idx := ri*nin + ci - if str { - rec[ci] = tsr.String1D(idx) - } else { - rec[ci] = strconv.FormatFloat(tsr.Float1D(idx), 'g', prec, 64) - } - } - err := cw.Write(rec) - if err != nil { - log.Println(err) - return err - } - } - cw.Flush() - return nil -} - -// ReadCSV reads a tensor from a comma-separated-values (CSV) file -// (where comma = any delimiter, specified in the delim arg), -// using the Go standard encoding/csv reader conforming -// to the official CSV standard. -// Reads all values and assigns as many as fit. -func ReadCSV(tsr Tensor, r io.Reader, delim rune) error { - cr := csv.NewReader(r) - if delim != 0 { - cr.Comma = delim - } - rec, err := cr.ReadAll() // todo: lazy, avoid resizing - if err != nil || len(rec) == 0 { - return err - } - rows := len(rec) - cols := len(rec[0]) - sz := tsr.Len() - idx := 0 - for ri := 0; ri < rows; ri++ { - for ci := 0; ci < cols; ci++ { - str := rec[ri][ci] - tsr.SetString1D(idx, str) - idx++ - if idx >= sz { - goto done - } - } - } -done: - return nil -} diff --git a/tensor/number.go b/tensor/number.go deleted file mode 100644 index 91a6adb142..0000000000 --- a/tensor/number.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tensor - -import ( - "fmt" - "log" - "math" - "strconv" - "strings" - - "cogentcore.org/core/base/num" - "cogentcore.org/core/base/slicesx" - "gonum.org/v1/gonum/mat" -) - -// Number is a tensor of numerical values -type Number[T num.Number] struct { - Base[T] -} - -// Float64 is an alias for Number[float64]. -type Float64 = Number[float64] - -// Float32 is an alias for Number[float32]. -type Float32 = Number[float32] - -// Int is an alias for Number[int]. -type Int = Number[int] - -// Int32 is an alias for Number[int32]. -type Int32 = Number[int32] - -// Byte is an alias for Number[byte]. -type Byte = Number[byte] - -// NewFloat32 returns a new Float32 tensor -// with the given sizes per dimension (shape), and optional dimension names. -func NewFloat32(sizes []int, names ...string) *Float32 { - return New[float32](sizes, names...).(*Float32) -} - -// NewFloat64 returns a new Float64 tensor -// with the given sizes per dimension (shape), and optional dimension names. -func NewFloat64(sizes []int, names ...string) *Float64 { - return New[float64](sizes, names...).(*Float64) -} - -// NewInt returns a new Int tensor -// with the given sizes per dimension (shape), and optional dimension names. -func NewInt(sizes []int, names ...string) *Int { - return New[float64](sizes, names...).(*Int) -} - -// NewInt32 returns a new Int32 tensor -// with the given sizes per dimension (shape), and optional dimension names. -func NewInt32(sizes []int, names ...string) *Int32 { - return New[float64](sizes, names...).(*Int32) -} - -// NewByte returns a new Byte tensor -// with the given sizes per dimension (shape), and optional dimension names. -func NewByte(sizes []int, names ...string) *Byte { - return New[float64](sizes, names...).(*Byte) -} - -// NewNumber returns a new n-dimensional tensor of numerical values -// with the given sizes per dimension (shape), and optional dimension names. -func NewNumber[T num.Number](sizes []int, names ...string) *Number[T] { - tsr := &Number[T]{} - tsr.SetShape(sizes, names...) - tsr.Values = make([]T, tsr.Len()) - return tsr -} - -// NewNumberShape returns a new n-dimensional tensor of numerical values -// using given shape. -func NewNumberShape[T num.Number](shape *Shape) *Number[T] { - tsr := &Number[T]{} - tsr.Shp.CopyShape(shape) - tsr.Values = make([]T, tsr.Len()) - return tsr -} - -func (tsr *Number[T]) IsString() bool { - return false -} - -func (tsr *Number[T]) AddScalar(i []int, val float64) float64 { - j := tsr.Shp.Offset(i) - tsr.Values[j] += T(val) - return float64(tsr.Values[j]) -} - -func (tsr *Number[T]) MulScalar(i []int, val float64) float64 { - j := tsr.Shp.Offset(i) - tsr.Values[j] *= T(val) - return float64(tsr.Values[j]) -} - -func (tsr *Number[T]) SetString(i []int, val string) { - if fv, err := strconv.ParseFloat(val, 64); err == nil { - j := tsr.Shp.Offset(i) - tsr.Values[j] = T(fv) - } -} - -func (tsr Number[T]) SetString1D(off int, val string) { - if fv, err := strconv.ParseFloat(val, 64); err == nil { - tsr.Values[off] = T(fv) - } -} -func (tsr *Number[T]) SetStringRowCell(row, cell int, val string) { - if fv, err := strconv.ParseFloat(val, 64); err == nil { - _, sz := tsr.Shp.RowCellSize() - tsr.Values[row*sz+cell] = T(fv) - } -} - -// String satisfies the fmt.Stringer interface for string of tensor data -func (tsr *Number[T]) String() string { - str := tsr.Label() - sz := len(tsr.Values) - if sz > 1000 { - return str - } - var b strings.Builder - b.WriteString(str) - b.WriteString("\n") - oddRow := true - rows, cols, _, _ := Projection2DShape(&tsr.Shp, oddRow) - for r := 0; r < rows; r++ { - rc, _ := Projection2DCoords(&tsr.Shp, oddRow, r, 0) - b.WriteString(fmt.Sprintf("%v: ", rc)) - for c := 0; c < cols; c++ { - vl := Projection2DValue(tsr, oddRow, r, c) - b.WriteString(fmt.Sprintf("%7g ", vl)) - } - b.WriteString("\n") - } - return b.String() -} - -func (tsr *Number[T]) Float(i []int) float64 { - j := tsr.Shp.Offset(i) - return float64(tsr.Values[j]) -} - -func (tsr *Number[T]) SetFloat(i []int, val float64) { - j := tsr.Shp.Offset(i) - tsr.Values[j] = T(val) -} - -func (tsr *Number[T]) Float1D(i int) float64 { - return float64(tsr.Values[i]) -} - -func (tsr *Number[T]) SetFloat1D(i int, val float64) { - tsr.Values[i] = T(val) -} - -func (tsr *Number[T]) FloatRowCell(row, cell int) float64 { - _, sz := tsr.Shp.RowCellSize() - i := row*sz + cell - return float64(tsr.Values[i]) -} - -func (tsr *Number[T]) SetFloatRowCell(row, cell int, val float64) { - _, sz := tsr.Shp.RowCellSize() - tsr.Values[row*sz+cell] = T(val) -} - -// Floats sets []float64 slice of all elements in the tensor -// (length is ensured to be sufficient). -// This can be used for all of the gonum/floats methods -// for basic math, gonum/stats, etc. -func (tsr *Number[T]) Floats(flt *[]float64) { - *flt = slicesx.SetLength(*flt, len(tsr.Values)) - switch vals := any(tsr.Values).(type) { - case []float64: - copy(*flt, vals) - default: - for i, v := range tsr.Values { - (*flt)[i] = float64(v) - } - } -} - -// SetFloats sets tensor values from a []float64 slice (copies values). -func (tsr *Number[T]) SetFloats(flt []float64) { - switch vals := any(tsr.Values).(type) { - case []float64: - copy(vals, flt) - default: - for i, v := range flt { - tsr.Values[i] = T(v) - } - } -} - -// At is the gonum/mat.Matrix interface method for returning 2D matrix element at given -// row, column index. Assumes Row-major ordering and logs an error if NumDims < 2. -func (tsr *Number[T]) At(i, j int) float64 { - nd := tsr.NumDims() - if nd < 2 { - log.Println("tensor Dims gonum Matrix call made on Tensor with dims < 2") - return 0 - } else if nd == 2 { - return tsr.Float([]int{i, j}) - } else { - ix := make([]int, nd) - ix[nd-2] = i - ix[nd-1] = j - return tsr.Float(ix) - } -} - -// T is the gonum/mat.Matrix transpose method. -// It performs an implicit transpose by returning the receiver inside a Transpose. -func (tsr *Number[T]) T() mat.Matrix { - return mat.Transpose{tsr} -} - -// Range returns the min, max (and associated indexes, -1 = no values) for the tensor. -// This is needed for display and is thus in the core api in optimized form -// Other math operations can be done using gonum/floats package. -func (tsr *Number[T]) Range() (min, max float64, minIndex, maxIndex int) { - minIndex = -1 - maxIndex = -1 - for j, vl := range tsr.Values { - fv := float64(vl) - if math.IsNaN(fv) { - continue - } - if fv < min || minIndex < 0 { - min = fv - minIndex = j - } - if fv > max || maxIndex < 0 { - max = fv - maxIndex = j - } - } - return -} - -// SetZeros is simple convenience function initialize all values to 0 -func (tsr *Number[T]) SetZeros() { - for j := range tsr.Values { - tsr.Values[j] = 0 - } -} - -// Clone clones this tensor, creating a duplicate copy of itself with its -// own separate memory representation of all the values, and returns -// that as a Tensor (which can be converted into the known type as needed). -func (tsr *Number[T]) Clone() Tensor { - csr := NewNumberShape[T](&tsr.Shp) - copy(csr.Values, tsr.Values) - return csr -} - -// CopyFrom copies all avail values from other tensor into this tensor, with an -// optimized implementation if the other tensor is of the same type, and -// otherwise it goes through appropriate standard type. -func (tsr *Number[T]) CopyFrom(frm Tensor) { - if fsm, ok := frm.(*Number[T]); ok { - copy(tsr.Values, fsm.Values) - return - } - sz := min(len(tsr.Values), frm.Len()) - for i := 0; i < sz; i++ { - tsr.Values[i] = T(frm.Float1D(i)) - } -} - -// CopyShapeFrom copies just the shape from given source tensor -// calling SetShape with the shape params from source (see for more docs). -func (tsr *Number[T]) CopyShapeFrom(frm Tensor) { - tsr.SetShape(frm.Shape().Sizes, frm.Shape().Names...) -} - -// CopyCellsFrom copies given range of values from other tensor into this tensor, -// using flat 1D indexes: to = starting index in this Tensor to start copying into, -// start = starting index on from Tensor to start copying from, and n = number of -// values to copy. Uses an optimized implementation if the other tensor is -// of the same type, and otherwise it goes through appropriate standard type. -func (tsr *Number[T]) CopyCellsFrom(frm Tensor, to, start, n int) { - if fsm, ok := frm.(*Number[T]); ok { - for i := 0; i < n; i++ { - tsr.Values[to+i] = fsm.Values[start+i] - } - return - } - for i := 0; i < n; i++ { - tsr.Values[to+i] = T(frm.Float1D(start + i)) - } -} - -// SubSpace returns a new tensor with innermost subspace at given -// offset(s) in outermost dimension(s) (len(offs) < NumDims). -// The new tensor points to the values of the this tensor (i.e., modifications -// will affect both), as its Values slice is a view onto the original (which -// is why only inner-most contiguous supsaces are supported). -// Use Clone() method to separate the two. -func (tsr *Number[T]) SubSpace(offs []int) Tensor { - b := tsr.subSpaceImpl(offs) - rt := &Number[T]{Base: *b} - return rt -} diff --git a/tensor/projection2d.go b/tensor/projection2d.go deleted file mode 100644 index 592cecc4e1..0000000000 --- a/tensor/projection2d.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tensor - -const ( - // OddRow is for oddRow arguments to Projection2D functions, - // specifies that the odd dimension goes along the row. - OddRow = true - - // OddColumn is for oddRow arguments to Projection2D functions, - // specifies that the odd dimension goes along the column. - OddColumn = false -) - -// Projection2DShape returns the size of a 2D projection of the given tensor Shape, -// collapsing higher dimensions down to 2D (and 1D up to 2D). -// For any odd number of dimensions, the remaining outer-most dimension -// can either be multipliexed across the row or column, given the oddRow arg. -// Even multiples of inner-most dimensions are assumed to be row, then column. -// rowEx returns the number of "extra" (higher dimensional) rows -// and colEx returns the number of extra cols -func Projection2DShape(shp *Shape, oddRow bool) (rows, cols, rowEx, colEx int) { - if shp.Len() == 0 { - return 1, 1, 0, 0 - } - nd := shp.NumDims() - switch nd { - case 1: - if oddRow { - return shp.DimSize(0), 1, 0, 0 - } else { - return 1, shp.DimSize(0), 0, 0 - } - case 2: - return shp.DimSize(0), shp.DimSize(1), 0, 0 - case 3: - if oddRow { - return shp.DimSize(0) * shp.DimSize(1), shp.DimSize(2), shp.DimSize(0), 0 - } else { - return shp.DimSize(1), shp.DimSize(0) * shp.DimSize(2), 0, shp.DimSize(0) - } - case 4: - return shp.DimSize(0) * shp.DimSize(2), shp.DimSize(1) * shp.DimSize(3), shp.DimSize(0), shp.DimSize(1) - case 5: - if oddRow { - return shp.DimSize(0) * shp.DimSize(1) * shp.DimSize(3), shp.DimSize(2) * shp.DimSize(4), shp.DimSize(0) * shp.DimSize(1), 0 - } else { - return shp.DimSize(1) * shp.DimSize(3), shp.DimSize(0) * shp.DimSize(2) * shp.DimSize(4), 0, shp.DimSize(0) * shp.DimSize(1) - } - } - return 1, 1, 0, 0 -} - -// Projection2DIndex returns the flat 1D index for given row, col coords for a 2D projection -// of the given tensor shape, collapsing higher dimensions down to 2D (and 1D up to 2D). -// For any odd number of dimensions, the remaining outer-most dimension -// can either be multipliexed across the row or column, given the oddRow arg. -// Even multiples of inner-most dimensions are assumed to be row, then column. -func Projection2DIndex(shp *Shape, oddRow bool, row, col int) int { - nd := shp.NumDims() - switch nd { - case 1: - if oddRow { - return row - } else { - return col - } - case 2: - return shp.Offset([]int{row, col}) - case 3: - if oddRow { - ny := shp.DimSize(1) - yy := row / ny - y := row % ny - return shp.Offset([]int{yy, y, col}) - } else { - nx := shp.DimSize(2) - xx := col / nx - x := col % nx - return shp.Offset([]int{xx, row, x}) - } - case 4: - ny := shp.DimSize(2) - yy := row / ny - y := row % ny - nx := shp.DimSize(3) - xx := col / nx - x := col % nx - return shp.Offset([]int{yy, xx, y, x}) - case 5: - // todo: oddRows version! - nyy := shp.DimSize(1) - ny := shp.DimSize(3) - yyy := row / (nyy * ny) - yy := row % (nyy * ny) - y := yy % ny - yy = yy / ny - nx := shp.DimSize(4) - xx := col / nx - x := col % nx - return shp.Offset([]int{yyy, yy, xx, y, x}) - } - return 0 -} - -// Projection2DCoords returns the corresponding full-dimensional coordinates -// that go into the given row, col coords for a 2D projection of the given tensor, -// collapsing higher dimensions down to 2D (and 1D up to 2D). -func Projection2DCoords(shp *Shape, oddRow bool, row, col int) (rowCoords, colCoords []int) { - idx := Projection2DIndex(shp, oddRow, row, col) - dims := shp.Index(idx) - nd := shp.NumDims() - switch nd { - case 1: - if oddRow { - return dims, []int{0} - } else { - return []int{0}, dims - } - case 2: - return dims[:1], dims[1:] - case 3: - if oddRow { - return dims[:2], dims[2:] - } else { - return dims[:1], dims[1:] - } - case 4: - return []int{dims[0], dims[2]}, []int{dims[1], dims[3]} - case 5: - if oddRow { - return []int{dims[0], dims[1], dims[3]}, []int{dims[2], dims[4]} - } else { - return []int{dims[1], dims[3]}, []int{dims[0], dims[2], dims[4]} - } - } - return nil, nil -} - -// Projection2DValue returns the float64 value at given row, col coords for a 2D projection -// of the given tensor, collapsing higher dimensions down to 2D (and 1D up to 2D). -// For any odd number of dimensions, the remaining outer-most dimension -// can either be multipliexed across the row or column, given the oddRow arg. -// Even multiples of inner-most dimensions are assumed to be row, then column. -func Projection2DValue(tsr Tensor, oddRow bool, row, col int) float64 { - idx := Projection2DIndex(tsr.Shape(), oddRow, row, col) - return tsr.Float1D(idx) -} - -// Projection2DString returns the string value at given row, col coords for a 2D projection -// of the given tensor, collapsing higher dimensions down to 2D (and 1D up to 2D). -// For any odd number of dimensions, the remaining outer-most dimension -// can either be multipliexed across the row or column, given the oddRow arg. -// Even multiples of inner-most dimensions are assumed to be row, then column. -func Projection2DString(tsr Tensor, oddRow bool, row, col int) string { - idx := Projection2DIndex(tsr.Shape(), oddRow, row, col) - return tsr.String1D(idx) -} - -// Projection2DSet sets a float64 value at given row, col coords for a 2D projection -// of the given tensor, collapsing higher dimensions down to 2D (and 1D up to 2D). -// For any odd number of dimensions, the remaining outer-most dimension -// can either be multipliexed across the row or column, given the oddRow arg. -// Even multiples of inner-most dimensions are assumed to be row, then column. -func Projection2DSet(tsr Tensor, oddRow bool, row, col int, val float64) { - idx := Projection2DIndex(tsr.Shape(), oddRow, row, col) - tsr.SetFloat1D(idx, val) -} - -// Projection2DSetString sets a string value at given row, col coords for a 2D projection -// of the given tensor, collapsing higher dimensions down to 2D (and 1D up to 2D). -// For any odd number of dimensions, the remaining outer-most dimension -// can either be multipliexed across the row or column, given the oddRow arg. -// Even multiples of inner-most dimensions are assumed to be row, then column. -func Projection2DSetString(tsr Tensor, oddRow bool, row, col int, val string) { - idx := Projection2DIndex(tsr.Shape(), oddRow, row, col) - tsr.SetString1D(idx, val) -} diff --git a/tensor/shape.go b/tensor/shape.go deleted file mode 100644 index e31f7fe553..0000000000 --- a/tensor/shape.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tensor - -import ( - "fmt" - "slices" -) - -// Shape manages a tensor's shape information, including strides and dimension names -// and can compute the flat index into an underlying 1D data storage array based on an -// n-dimensional index (and vice-versa). -// Per C / Go / Python conventions, indexes are Row-Major, ordered from -// outer to inner left-to-right, so the inner-most is right-most. -type Shape struct { - - // size per dimension - Sizes []int - - // offsets for each dimension - Strides []int `display:"-"` - - // names of each dimension - Names []string `display:"-"` -} - -// NewShape returns a new shape with given sizes and optional dimension names. -// RowMajor ordering is used by default. -func NewShape(sizes []int, names ...string) *Shape { - sh := &Shape{} - sh.SetShape(sizes, names...) - return sh -} - -// SetShape sets the shape size and optional names -// RowMajor ordering is used by default. -func (sh *Shape) SetShape(sizes []int, names ...string) { - sh.Sizes = slices.Clone(sizes) - sh.Strides = RowMajorStrides(sizes) - sh.Names = make([]string, len(sh.Sizes)) - if len(names) == len(sizes) { - copy(sh.Names, names) - } -} - -// CopyShape copies the shape parameters from another Shape struct. -// copies the data so it is not accidentally subject to updates. -func (sh *Shape) CopyShape(cp *Shape) { - sh.Sizes = slices.Clone(cp.Sizes) - sh.Strides = slices.Clone(cp.Strides) - sh.Names = slices.Clone(cp.Names) -} - -// Len returns the total length of elements in the tensor -// (i.e., the product of the shape sizes) -func (sh *Shape) Len() int { - if len(sh.Sizes) == 0 { - return 0 - } - o := int(1) - for _, v := range sh.Sizes { - o *= v - } - return int(o) -} - -// NumDims returns the total number of dimensions. -func (sh *Shape) NumDims() int { return len(sh.Sizes) } - -// DimSize returns the size of given dimension. -func (sh *Shape) DimSize(i int) int { return sh.Sizes[i] } - -// DimName returns the name of given dimension. -func (sh *Shape) DimName(i int) string { return sh.Names[i] } - -// DimByName returns the index of the given dimension name. -// returns -1 if not found. -func (sh *Shape) DimByName(name string) int { - for i, nm := range sh.Names { - if nm == name { - return i - } - } - return -1 -} - -// DimSizeByName returns the size of given dimension, specified by name. -// will crash if name not found. -func (sh *Shape) DimSizeByName(name string) int { - return sh.DimSize(sh.DimByName(name)) -} - -// IndexIsValid() returns true if given index is valid (within ranges for all dimensions) -func (sh *Shape) IndexIsValid(idx []int) bool { - if len(idx) != sh.NumDims() { - return false - } - for i, v := range sh.Sizes { - if idx[i] < 0 || idx[i] >= v { - return false - } - } - return true -} - -// IsEqual returns true if this shape is same as other (does not compare names) -func (sh *Shape) IsEqual(oth *Shape) bool { - if !EqualInts(sh.Sizes, oth.Sizes) { - return false - } - if !EqualInts(sh.Strides, oth.Strides) { - return false - } - return true -} - -// RowCellSize returns the size of the outer-most Row shape dimension, -// and the size of all the remaining inner dimensions (the "cell" size). -// Used for Tensors that are columns in a data table. -func (sh *Shape) RowCellSize() (rows, cells int) { - rows = sh.Sizes[0] - if len(sh.Sizes) == 1 { - cells = 1 - } else { - cells = sh.Len() / rows - } - return -} - -// Offset returns the "flat" 1D array index into an element at the given n-dimensional index. -// No checking is done on the length or size of the index values relative to the shape of the tensor. -func (sh *Shape) Offset(index []int) int { - var offset int - for i, v := range index { - offset += v * sh.Strides[i] - } - return offset -} - -// Index returns the n-dimensional index from a "flat" 1D array index. -func (sh *Shape) Index(offset int) []int { - nd := len(sh.Sizes) - index := make([]int, nd) - rem := offset - for i := nd - 1; i >= 0; i-- { - s := sh.Sizes[i] - iv := rem % s - rem /= s - index[i] = iv - } - return index -} - -// String satisfies the fmt.Stringer interface -func (sh *Shape) String() string { - str := "[" - for i := range sh.Sizes { - nm := sh.Names[i] - if nm != "" { - str += nm + ": " - } - str += fmt.Sprintf("%d", sh.Sizes[i]) - if i < len(sh.Sizes)-1 { - str += ", " - } - } - str += "]" - return str -} - -// RowMajorStrides returns strides for sizes where the first dimension is outer-most -// and subsequent dimensions are progressively inner. -func RowMajorStrides(sizes []int) []int { - rem := int(1) - for _, v := range sizes { - rem *= v - } - - if rem == 0 { - strides := make([]int, len(sizes)) - rem := int(1) - for i := range strides { - strides[i] = rem - } - return strides - } - - strides := make([]int, len(sizes)) - for i, v := range sizes { - rem /= v - strides[i] = rem - } - return strides -} - -// ColMajorStrides returns strides for sizes where the first dimension is inner-most -// and subsequent dimensions are progressively outer -func ColMajorStrides(sizes []int) []int { - total := int(1) - for _, v := range sizes { - if v == 0 { - strides := make([]int, len(sizes)) - for i := range strides { - strides[i] = total - } - return strides - } - } - - strides := make([]int, len(sizes)) - for i, v := range sizes { - strides[i] = total - total *= v - } - return strides -} - -// EqualInts compares two int slices and returns true if they are equal -func EqualInts(a, b []int) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if a[i] != b[i] { - return false - } - } - return true -} - -// AddShapes returns a new shape by adding two shapes one after the other. -func AddShapes(shape1, shape2 *Shape) *Shape { - sh1 := shape1.Sizes - sh2 := shape2.Sizes - nsh := make([]int, len(sh1)+len(sh2)) - copy(nsh, sh1) - copy(nsh[len(sh1):], sh2) - nms := make([]string, len(sh1)+len(sh2)) - copy(nms, shape1.Names) - copy(nms[len(sh1):], shape2.Names) - return NewShape(nsh, nms...) -} diff --git a/tensor/stats/README.md b/tensor/stats/README.md deleted file mode 100644 index 9e40210158..0000000000 --- a/tensor/stats/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# stats - -There are several packages here for operating on vector, [tensor](../tensor), and [table](../table) data, for computing standard statistics and performing related computations, such as normalizing the data. - -* [clust](clust) implements agglomerative clustering of items based on [simat](simat) similarity matrix data. -* [convolve](convolve) convolves data (e.g., for smoothing). -* [glm](glm) fits a general linear model for one or more dependent variables as a function of one or more independent variables. This encompasses all forms of regression. -* [histogram](histogram) bins data into groups and reports the frequency of elements in the bins. -* [metric](metric) computes similarity / distance metrics for comparing two vectors -* [norm](norm) normalizes vector data -* [pca](pca) computes principal components analysis (PCA) or singular value decomposition (SVD) on correlation matricies, which is a widely-used way of reducing the dimensionality of high-dimensional data. -* [simat](simat) computes a similarity matrix for the [metric](metric) similarity of two vectors. -* [split](split) provides grouping and aggregation functions operating on `table.Table` data, e.g., like a "pivot table" in a spreadsheet. -* [stats](stats) provides a set of standard summary statistics on a range of different data types, including basic slices of floats, to tensor and table data. - diff --git a/tensor/stats/clust/README.md b/tensor/stats/clust/README.md deleted file mode 100644 index a11a82a5d9..0000000000 --- a/tensor/stats/clust/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# clust - -`clust` implements agglomerative clustering of items based on [simat](../simat) similarity matrix data. - -`GlomClust` is the main function, taking different `DistFunc` options for comparing distance between items. - - diff --git a/tensor/stats/clust/clust.go b/tensor/stats/clust/clust.go deleted file mode 100644 index 666990b250..0000000000 --- a/tensor/stats/clust/clust.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package clust - -//go:generate core generate - -import ( - "fmt" - "math" - "math/rand" - - "cogentcore.org/core/base/indent" - "cogentcore.org/core/tensor" - "cogentcore.org/core/tensor/stats/simat" - "cogentcore.org/core/tensor/stats/stats" -) - -// Node is one node in the cluster -type Node struct { - - // index into original distance matrix -- only valid for for terminal leaves - Index int - - // distance for this node -- how far apart were all the kids from each other when this node was created -- is 0 for leaf nodes - Dist float64 - - // total aggregate distance from parents -- the X axis offset at which our cluster starts - ParDist float64 - - // y-axis value for this node -- if a parent, it is the average of its kids Y's, otherwise it counts down - Y float64 - - // child nodes under this one - Kids []*Node -} - -// IsLeaf returns true if node is a leaf of the tree with no kids -func (nn *Node) IsLeaf() bool { - return len(nn.Kids) == 0 -} - -// Sprint prints to string -func (nn *Node) Sprint(smat *simat.SimMat, depth int) string { - if nn.IsLeaf() { - return smat.Rows[nn.Index] + " " - } - sv := fmt.Sprintf("\n%v%v: ", indent.Tabs(depth), nn.Dist) - for _, kn := range nn.Kids { - sv += kn.Sprint(smat, depth+1) - } - return sv -} - -// Indexes collects all the indexes in this node -func (nn *Node) Indexes(ix []int, ctr *int) { - if nn.IsLeaf() { - ix[*ctr] = nn.Index - (*ctr)++ - } else { - for _, kn := range nn.Kids { - kn.Indexes(ix, ctr) - } - } -} - -// NewNode merges two nodes into a new node -func NewNode(na, nb *Node, dst float64) *Node { - nn := &Node{Dist: dst} - nn.Kids = []*Node{na, nb} - return nn -} - -// Glom implements basic agglomerative clustering, based on a raw similarity matrix as given. -// This calls GlomInit to initialize the root node with all of the leaves, and the calls -// GlomClust to do the iterative clustering process. If you want to start with pre-defined -// initial clusters, then call GlomClust with a root node so-initialized. -// The smat.Mat matrix must be an tensor.Float64. -func Glom(smat *simat.SimMat, dfunc DistFunc) *Node { - ntot := smat.Mat.DimSize(0) // number of leaves - root := GlomInit(ntot) - return GlomClust(root, smat, dfunc) -} - -// GlomStd implements basic agglomerative clustering, based on a raw similarity matrix as given. -// This calls GlomInit to initialize the root node with all of the leaves, and the calls -// GlomClust to do the iterative clustering process. If you want to start with pre-defined -// initial clusters, then call GlomClust with a root node so-initialized. -// The smat.Mat matrix must be an tensor.Float64. -// Std version uses std distance functions -func GlomStd(smat *simat.SimMat, std StdDists) *Node { - return Glom(smat, StdFunc(std)) -} - -// GlomInit returns a standard root node initialized with all of the leaves -func GlomInit(ntot int) *Node { - root := &Node{} - root.Kids = make([]*Node, ntot) - for i := 0; i < ntot; i++ { - root.Kids[i] = &Node{Index: i} - } - return root -} - -// GlomClust does the iterative agglomerative clustering, based on a raw similarity matrix as given, -// using a root node that has already been initialized with the starting clusters (all of the -// leaves by default, but could be anything if you want to start with predefined clusters). -// The smat.Mat matrix must be an tensor.Float64. -func GlomClust(root *Node, smat *simat.SimMat, dfunc DistFunc) *Node { - ntot := smat.Mat.DimSize(0) // number of leaves - smatf := smat.Mat.(*tensor.Float64).Values - maxd := stats.Max64(smatf) - // indexes in each group - aidx := make([]int, ntot) - bidx := make([]int, ntot) - for { - var ma, mb []int - mval := math.MaxFloat64 - for ai, ka := range root.Kids { - actr := 0 - ka.Indexes(aidx, &actr) - aix := aidx[0:actr] - for bi := 0; bi < ai; bi++ { - kb := root.Kids[bi] - bctr := 0 - kb.Indexes(bidx, &bctr) - bix := bidx[0:bctr] - dv := dfunc(aix, bix, ntot, maxd, smatf) - if dv < mval { - mval = dv - ma = []int{ai} - mb = []int{bi} - } else if dv == mval { // do all ties at same time - ma = append(ma, ai) - mb = append(mb, bi) - } - } - } - ni := 0 - if len(ma) > 1 { - ni = rand.Intn(len(ma)) - } - na := ma[ni] - nb := mb[ni] - // fmt.Printf("merging nodes at dist: %v: %v and %v\nA: %v\nB: %v\n", mval, na, nb, root.Kids[na].Sprint(smat, 0), root.Kids[nb].Sprint(smat, 0)) - nn := NewNode(root.Kids[na], root.Kids[nb], mval) - for i := len(root.Kids) - 1; i >= 0; i-- { - if i == na || i == nb { - root.Kids = append(root.Kids[:i], root.Kids[i+1:]...) - } - } - root.Kids = append(root.Kids, nn) - if len(root.Kids) == 1 { - break - } - } - return root -} diff --git a/tensor/stats/clust/clust_test.go b/tensor/stats/clust/clust_test.go deleted file mode 100644 index 858dc0dd6b..0000000000 --- a/tensor/stats/clust/clust_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package clust - -import ( - "testing" - - "cogentcore.org/core/base/tolassert" - "cogentcore.org/core/tensor/stats/metric" - "cogentcore.org/core/tensor/stats/simat" - "cogentcore.org/core/tensor/table" -) - -var clustres = ` -0: - 9.181170003996987: - 5.534356399283666: - 4.859933131085473: - 3.4641016151377544: Mark_sad Mark_happy - 3.4641016151377544: Zane_sad Zane_happy - 3.4641016151377544: Alberto_sad Alberto_happy - 5.111664626761644: - 4.640135790634417: - 4: Lisa_sad Lisa_happy - 3.4641016151377544: Betty_sad Betty_happy - 3.605551275463989: Wendy_sad Wendy_happy ` - -func TestClust(t *testing.T) { - dt := &table.Table{} - err := dt.OpenCSV("testdata/faces.dat", table.Tab) - if err != nil { - t.Error(err) - } - ix := table.NewIndexView(dt) - smat := &simat.SimMat{} - smat.TableColumn(ix, "Input", "Name", false, metric.Euclidean64) - - // fmt.Printf("%v\n", smat.Mat) - // cl := Glom(smat, MinDist) - cl := Glom(smat, AvgDist) - // s := cl.Sprint(smat, 0) - // fmt.Println(s) - - var dists []float64 - - var gather func(n *Node) - gather = func(n *Node) { - dists = append(dists, n.Dist) - for _, kn := range n.Kids { - gather(kn) - } - } - gather(cl) - - exdists := []float64{0, 9.181170003996987, 5.534356399283667, 4.859933131085473, 3.4641016151377544, 0, 0, 3.4641016151377544, 0, 0, 3.4641016151377544, 0, 0, 5.111664626761644, 4.640135790634417, 4, 0, 0, 3.4641016151377544, 0, 0, 3.605551275463989, 0, 0} - - tolassert.EqualTolSlice(t, exdists, dists, 1.0e-8) -} diff --git a/tensor/stats/clust/dist.go b/tensor/stats/clust/dist.go deleted file mode 100644 index a60acf62b9..0000000000 --- a/tensor/stats/clust/dist.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package clust - -import ( - "math" -) - -// DistFunc is a clustering distance function that evaluates aggregate distance -// between nodes, given the indexes of leaves in a and b clusters -// which are indexs into an ntot x ntot similarity (distance) matrix smat. -// maxd is the maximum distance value in the smat, which is needed by the -// ContrastDist function and perhaps others. -type DistFunc func(aix, bix []int, ntot int, maxd float64, smat []float64) float64 - -// MinDist is the minimum-distance or single-linkage weighting function for comparing -// two clusters a and b, given by their list of indexes. -// ntot is total number of nodes, and smat is the square similarity matrix [ntot x ntot]. -func MinDist(aix, bix []int, ntot int, maxd float64, smat []float64) float64 { - md := math.MaxFloat64 - for _, ai := range aix { - for _, bi := range bix { - d := smat[ai*ntot+bi] - if d < md { - md = d - } - } - } - return md -} - -// MaxDist is the maximum-distance or complete-linkage weighting function for comparing -// two clusters a and b, given by their list of indexes. -// ntot is total number of nodes, and smat is the square similarity matrix [ntot x ntot]. -func MaxDist(aix, bix []int, ntot int, maxd float64, smat []float64) float64 { - md := -math.MaxFloat64 - for _, ai := range aix { - for _, bi := range bix { - d := smat[ai*ntot+bi] - if d > md { - md = d - } - } - } - return md -} - -// AvgDist is the average-distance or average-linkage weighting function for comparing -// two clusters a and b, given by their list of indexes. -// ntot is total number of nodes, and smat is the square similarity matrix [ntot x ntot]. -func AvgDist(aix, bix []int, ntot int, maxd float64, smat []float64) float64 { - md := 0.0 - n := 0 - for _, ai := range aix { - for _, bi := range bix { - d := smat[ai*ntot+bi] - md += d - n++ - } - } - if n > 0 { - md /= float64(n) - } - return md -} - -// ContrastDist computes maxd + (average within distance - average between distance) -// for two clusters a and b, given by their list of indexes. -// avg between is average distance between all items in a & b versus all outside that. -// ntot is total number of nodes, and smat is the square similarity matrix [ntot x ntot]. -// maxd is the maximum distance and is needed to ensure distances are positive. -func ContrastDist(aix, bix []int, ntot int, maxd float64, smat []float64) float64 { - wd := AvgDist(aix, bix, ntot, maxd, smat) - nab := len(aix) + len(bix) - abix := append(aix, bix...) - abmap := make(map[int]struct{}, ntot-nab) - for _, ix := range abix { - abmap[ix] = struct{}{} - } - oix := make([]int, ntot-nab) - octr := 0 - for ix := 0; ix < ntot; ix++ { - if _, has := abmap[ix]; !has { - oix[octr] = ix - octr++ - } - } - bd := AvgDist(abix, oix, ntot, maxd, smat) - return maxd + (wd - bd) -} - -// StdDists are standard clustering distance functions -type StdDists int32 //enums:enum - -const ( - // Min is the minimum-distance or single-linkage weighting function - Min StdDists = iota - - // Max is the maximum-distance or complete-linkage weighting function - Max - - // Avg is the average-distance or average-linkage weighting function - Avg - - // Contrast computes maxd + (average within distance - average between distance) - Contrast -) - -// StdFunc returns a standard distance function as specified -func StdFunc(std StdDists) DistFunc { - switch std { - case Min: - return MinDist - case Max: - return MaxDist - case Avg: - return AvgDist - case Contrast: - return ContrastDist - } - return nil -} diff --git a/tensor/stats/clust/enumgen.go b/tensor/stats/clust/enumgen.go deleted file mode 100644 index 1cbbc383b5..0000000000 --- a/tensor/stats/clust/enumgen.go +++ /dev/null @@ -1,48 +0,0 @@ -// Code generated by "core generate"; DO NOT EDIT. - -package clust - -import ( - "cogentcore.org/core/enums" -) - -var _StdDistsValues = []StdDists{0, 1, 2, 3} - -// StdDistsN is the highest valid value for type StdDists, plus one. -const StdDistsN StdDists = 4 - -var _StdDistsValueMap = map[string]StdDists{`Min`: 0, `Max`: 1, `Avg`: 2, `Contrast`: 3} - -var _StdDistsDescMap = map[StdDists]string{0: `Min is the minimum-distance or single-linkage weighting function`, 1: `Max is the maximum-distance or complete-linkage weighting function`, 2: `Avg is the average-distance or average-linkage weighting function`, 3: `Contrast computes maxd + (average within distance - average between distance)`} - -var _StdDistsMap = map[StdDists]string{0: `Min`, 1: `Max`, 2: `Avg`, 3: `Contrast`} - -// String returns the string representation of this StdDists value. -func (i StdDists) String() string { return enums.String(i, _StdDistsMap) } - -// SetString sets the StdDists value from its string representation, -// and returns an error if the string is invalid. -func (i *StdDists) SetString(s string) error { - return enums.SetString(i, s, _StdDistsValueMap, "StdDists") -} - -// Int64 returns the StdDists value as an int64. -func (i StdDists) Int64() int64 { return int64(i) } - -// SetInt64 sets the StdDists value from an int64. -func (i *StdDists) SetInt64(in int64) { *i = StdDists(in) } - -// Desc returns the description of the StdDists value. -func (i StdDists) Desc() string { return enums.Desc(i, _StdDistsDescMap) } - -// StdDistsValues returns all possible values for the type StdDists. -func StdDistsValues() []StdDists { return _StdDistsValues } - -// Values returns all possible values for the type StdDists. -func (i StdDists) Values() []enums.Enum { return enums.Values(_StdDistsValues) } - -// MarshalText implements the [encoding.TextMarshaler] interface. -func (i StdDists) MarshalText() ([]byte, error) { return []byte(i.String()), nil } - -// UnmarshalText implements the [encoding.TextUnmarshaler] interface. -func (i *StdDists) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "StdDists") } diff --git a/tensor/stats/clust/plot.go b/tensor/stats/clust/plot.go deleted file mode 100644 index 51f9144926..0000000000 --- a/tensor/stats/clust/plot.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package clust - -import ( - "cogentcore.org/core/tensor/stats/simat" - "cogentcore.org/core/tensor/table" -) - -// Plot sets the rows of given data table to trace out lines with labels that -// will render cluster plot starting at root node when plotted with a standard plotting package. -// The lines double-back on themselves to form a continuous line to be plotted. -func Plot(pt *table.Table, root *Node, smat *simat.SimMat) { - pt.DeleteAll() - pt.AddFloat64Column("X") - pt.AddFloat64Column("Y") - pt.AddStringColumn("Label") - nextY := 0.5 - root.SetYs(&nextY) - root.SetParDist(0.0) - root.Plot(pt, smat) -} - -// Plot sets the rows of given data table to trace out lines with labels that -// will render this node in a cluster plot when plotted with a standard plotting package. -// The lines double-back on themselves to form a continuous line to be plotted. -func (nn *Node) Plot(pt *table.Table, smat *simat.SimMat) { - row := pt.Rows - if nn.IsLeaf() { - pt.SetNumRows(row + 1) - pt.SetFloatIndex(0, row, nn.ParDist) - pt.SetFloatIndex(1, row, nn.Y) - if len(smat.Rows) > nn.Index { - pt.SetStringIndex(2, row, smat.Rows[nn.Index]) - } - } else { - for _, kn := range nn.Kids { - pt.SetNumRows(row + 2) - pt.SetFloatIndex(0, row, nn.ParDist) - pt.SetFloatIndex(1, row, kn.Y) - row++ - pt.SetFloatIndex(0, row, nn.ParDist+nn.Dist) - pt.SetFloatIndex(1, row, kn.Y) - kn.Plot(pt, smat) - row = pt.Rows - pt.SetNumRows(row + 1) - pt.SetFloatIndex(0, row, nn.ParDist) - pt.SetFloatIndex(1, row, kn.Y) - row++ - } - pt.SetNumRows(row + 1) - pt.SetFloatIndex(0, row, nn.ParDist) - pt.SetFloatIndex(1, row, nn.Y) - } -} - -// SetYs sets the Y-axis values for the nodes in preparation for plotting. -func (nn *Node) SetYs(nextY *float64) { - if nn.IsLeaf() { - nn.Y = *nextY - (*nextY) += 1.0 - } else { - avgy := 0.0 - for _, kn := range nn.Kids { - kn.SetYs(nextY) - avgy += kn.Y - } - avgy /= float64(len(nn.Kids)) - nn.Y = avgy - } -} - -// SetParDist sets the parent distance for the nodes in preparation for plotting. -func (nn *Node) SetParDist(pard float64) { - nn.ParDist = pard - if !nn.IsLeaf() { - pard += nn.Dist - for _, kn := range nn.Kids { - kn.SetParDist(pard) - } - } -} diff --git a/tensor/stats/clust/testdata/faces.dat b/tensor/stats/clust/testdata/faces.dat deleted file mode 100644 index 53912dc13a..0000000000 --- a/tensor/stats/clust/testdata/faces.dat +++ /dev/null @@ -1,13 +0,0 @@ -_H: $Name %Input[2:0,0]<2:16,16> %Input[2:0,1] %Input[2:0,2] %Input[2:0,3] %Input[2:0,4] %Input[2:0,5] %Input[2:0,6] %Input[2:0,7] %Input[2:0,8] %Input[2:0,9] %Input[2:0,10] %Input[2:0,11] %Input[2:0,12] %Input[2:0,13] %Input[2:0,14] %Input[2:0,15] %Input[2:1,0] %Input[2:1,1] %Input[2:1,2] %Input[2:1,3] %Input[2:1,4] %Input[2:1,5] %Input[2:1,6] %Input[2:1,7] %Input[2:1,8] %Input[2:1,9] %Input[2:1,10] %Input[2:1,11] %Input[2:1,12] %Input[2:1,13] %Input[2:1,14] %Input[2:1,15] %Input[2:2,0] %Input[2:2,1] %Input[2:2,2] %Input[2:2,3] %Input[2:2,4] %Input[2:2,5] %Input[2:2,6] %Input[2:2,7] %Input[2:2,8] %Input[2:2,9] %Input[2:2,10] %Input[2:2,11] %Input[2:2,12] %Input[2:2,13] %Input[2:2,14] %Input[2:2,15] %Input[2:3,0] %Input[2:3,1] %Input[2:3,2] %Input[2:3,3] %Input[2:3,4] %Input[2:3,5] %Input[2:3,6] %Input[2:3,7] %Input[2:3,8] %Input[2:3,9] %Input[2:3,10] %Input[2:3,11] %Input[2:3,12] %Input[2:3,13] %Input[2:3,14] %Input[2:3,15] %Input[2:4,0] %Input[2:4,1] %Input[2:4,2] %Input[2:4,3] %Input[2:4,4] %Input[2:4,5] %Input[2:4,6] %Input[2:4,7] %Input[2:4,8] %Input[2:4,9] %Input[2:4,10] %Input[2:4,11] %Input[2:4,12] %Input[2:4,13] %Input[2:4,14] %Input[2:4,15] %Input[2:5,0] %Input[2:5,1] %Input[2:5,2] %Input[2:5,3] %Input[2:5,4] %Input[2:5,5] %Input[2:5,6] %Input[2:5,7] %Input[2:5,8] %Input[2:5,9] %Input[2:5,10] %Input[2:5,11] %Input[2:5,12] %Input[2:5,13] %Input[2:5,14] %Input[2:5,15] %Input[2:6,0] %Input[2:6,1] %Input[2:6,2] %Input[2:6,3] %Input[2:6,4] %Input[2:6,5] %Input[2:6,6] %Input[2:6,7] %Input[2:6,8] %Input[2:6,9] %Input[2:6,10] %Input[2:6,11] %Input[2:6,12] %Input[2:6,13] %Input[2:6,14] %Input[2:6,15] %Input[2:7,0] %Input[2:7,1] %Input[2:7,2] %Input[2:7,3] %Input[2:7,4] %Input[2:7,5] %Input[2:7,6] %Input[2:7,7] %Input[2:7,8] %Input[2:7,9] %Input[2:7,10] %Input[2:7,11] %Input[2:7,12] %Input[2:7,13] %Input[2:7,14] %Input[2:7,15] %Input[2:8,0] %Input[2:8,1] %Input[2:8,2] %Input[2:8,3] %Input[2:8,4] %Input[2:8,5] %Input[2:8,6] %Input[2:8,7] %Input[2:8,8] %Input[2:8,9] %Input[2:8,10] %Input[2:8,11] %Input[2:8,12] %Input[2:8,13] %Input[2:8,14] %Input[2:8,15] %Input[2:9,0] %Input[2:9,1] %Input[2:9,2] %Input[2:9,3] %Input[2:9,4] %Input[2:9,5] %Input[2:9,6] %Input[2:9,7] %Input[2:9,8] %Input[2:9,9] %Input[2:9,10] %Input[2:9,11] %Input[2:9,12] %Input[2:9,13] %Input[2:9,14] %Input[2:9,15] %Input[2:10,0] %Input[2:10,1] %Input[2:10,2] %Input[2:10,3] %Input[2:10,4] %Input[2:10,5] %Input[2:10,6] %Input[2:10,7] %Input[2:10,8] %Input[2:10,9] %Input[2:10,10] %Input[2:10,11] %Input[2:10,12] %Input[2:10,13] %Input[2:10,14] %Input[2:10,15] %Input[2:11,0] %Input[2:11,1] %Input[2:11,2] %Input[2:11,3] %Input[2:11,4] %Input[2:11,5] %Input[2:11,6] %Input[2:11,7] %Input[2:11,8] %Input[2:11,9] %Input[2:11,10] %Input[2:11,11] %Input[2:11,12] %Input[2:11,13] %Input[2:11,14] %Input[2:11,15] %Input[2:12,0] %Input[2:12,1] %Input[2:12,2] %Input[2:12,3] %Input[2:12,4] %Input[2:12,5] %Input[2:12,6] %Input[2:12,7] %Input[2:12,8] %Input[2:12,9] %Input[2:12,10] %Input[2:12,11] %Input[2:12,12] %Input[2:12,13] %Input[2:12,14] %Input[2:12,15] %Input[2:13,0] %Input[2:13,1] %Input[2:13,2] %Input[2:13,3] %Input[2:13,4] %Input[2:13,5] %Input[2:13,6] %Input[2:13,7] %Input[2:13,8] %Input[2:13,9] %Input[2:13,10] %Input[2:13,11] %Input[2:13,12] %Input[2:13,13] %Input[2:13,14] %Input[2:13,15] %Input[2:14,0] %Input[2:14,1] %Input[2:14,2] %Input[2:14,3] %Input[2:14,4] %Input[2:14,5] %Input[2:14,6] %Input[2:14,7] %Input[2:14,8] %Input[2:14,9] %Input[2:14,10] %Input[2:14,11] %Input[2:14,12] %Input[2:14,13] %Input[2:14,14] %Input[2:14,15] %Input[2:15,0] %Input[2:15,1] %Input[2:15,2] %Input[2:15,3] %Input[2:15,4] %Input[2:15,5] %Input[2:15,6] %Input[2:15,7] %Input[2:15,8] %Input[2:15,9] %Input[2:15,10] %Input[2:15,11] %Input[2:15,12] %Input[2:15,13] %Input[2:15,14] %Input[2:15,15] %Emotion[2:0,0]<2:1,2> %Emotion[2:0,1] %Gender[2:0,0]<2:1,2> %Gender[2:0,1] %Identity[2:0,0]<2:1,10> %Identity[2:0,1] %Identity[2:0,2] %Identity[2:0,3] %Identity[2:0,4] %Identity[2:0,5] %Identity[2:0,6] %Identity[2:0,7] %Identity[2:0,8] %Identity[2:0,9] -_D: Alberto_happy 0 0 0 0 0 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 1 1 0 0 0 1 0 0 0 0 0 1 0 0 0 1 0 0 1 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1 1 0 0 0 0 1 1 0 0 0 0 1 1 0 0 1 1 0 0 0 0 1 0 0 0 0 0 1 1 0 0 1 1 0 0 0 0 1 0 0 0 0 0 1 1 0 0 0 1 0 1 0 1 0 0 1 0 1 0 1 0 0 0 0 1 0 0 1 0 0 0 0 1 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 1 0 0 0 0 0 0 0 0 0 -_D: Alberto_sad 0 0 0 0 0 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 1 0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1 1 0 0 0 0 1 1 0 0 0 0 1 1 0 0 1 1 0 0 0 0 1 0 0 0 0 0 1 1 0 0 1 1 0 0 0 0 1 0 0 0 0 0 1 1 0 0 0 1 0 0 0 1 0 0 1 0 0 0 1 0 0 0 0 1 0 1 1 0 0 0 0 1 1 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 1 0 0 0 0 0 0 0 0 0 -_D: Betty_happy 1 0 1 0 0 0 1 1 1 1 0 0 0 1 0 1 0 1 0 1 0 1 0 0 0 0 1 0 1 0 1 0 0 0 1 0 1 0 0 1 1 0 0 1 0 1 0 0 0 1 0 1 0 0 1 0 0 1 0 1 0 0 1 0 0 0 1 1 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 1 0 0 0 1 0 0 0 0 1 0 0 0 0 0 1 1 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 1 0 1 0 1 0 1 0 1 0 0 0 0 0 0 1 0 1 0 0 0 1 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 1 0 0 0 0 0 0 0 0 -_D: Betty_sad 1 0 1 0 0 0 1 1 1 1 0 0 0 1 0 1 0 1 0 1 0 1 0 0 0 0 1 0 1 0 1 0 0 0 1 0 1 0 1 0 0 1 0 1 0 1 0 0 0 1 0 1 0 0 0 1 1 0 0 1 0 0 1 0 0 0 1 1 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 1 0 0 0 1 0 0 0 0 1 0 0 0 0 0 1 1 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 1 0 1 0 0 0 1 0 0 0 0 0 0 1 1 1 0 0 0 1 1 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 1 0 0 0 0 0 0 0 0 -_D: Lisa_happy 1 0 1 0 0 0 1 1 1 1 0 0 0 1 0 1 0 1 0 1 0 1 0 0 0 0 1 0 1 0 1 0 0 0 1 0 1 0 0 1 1 0 0 1 0 1 0 0 0 1 0 1 0 0 1 0 0 1 0 1 0 0 1 0 0 0 1 1 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 1 0 0 0 1 0 0 0 0 1 0 0 0 0 0 1 1 0 0 0 1 0 0 0 0 1 1 0 0 0 0 1 1 0 0 0 0 0 0 0 0 1 1 0 0 0 0 1 1 1 0 1 0 1 0 1 0 1 1 0 0 0 0 0 1 0 1 0 0 0 1 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 1 0 1 0 0 0 1 0 0 0 0 0 0 0 -_D: Lisa_sad 1 0 1 0 0 0 1 1 1 1 0 0 0 1 0 1 0 1 0 1 0 1 0 0 0 0 1 0 1 0 1 0 0 0 1 0 1 0 1 0 0 1 0 1 0 1 0 0 0 1 0 1 0 0 0 1 1 0 0 1 0 0 1 0 0 0 1 1 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 1 0 0 0 1 0 0 0 0 1 0 0 0 0 0 1 1 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 1 0 1 0 0 0 1 0 0 0 0 0 0 1 1 1 0 0 0 1 1 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 1 1 0 0 0 1 0 0 0 0 0 0 0 -_D: Mark_happy 0 0 0 0 0 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 1 1 0 0 0 1 0 0 0 0 0 1 0 0 0 1 0 0 1 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 1 0 0 0 1 1 0 0 0 0 1 0 0 0 0 0 1 1 0 0 1 1 0 0 0 0 1 0 0 0 0 0 1 1 0 0 1 1 0 1 0 1 0 0 1 0 1 0 1 1 0 0 0 1 0 0 1 0 0 0 0 1 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 0 0 0 0 0 1 0 0 1 0 0 0 1 0 0 0 0 0 0 -_D: Mark_sad 0 0 0 0 0 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 1 0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 1 0 0 0 1 1 0 0 0 0 1 0 0 0 0 0 1 1 0 0 1 1 0 0 0 0 1 0 0 0 0 0 1 1 0 0 1 1 0 0 0 1 0 0 1 0 0 0 1 1 0 0 0 1 0 1 1 0 0 0 0 1 1 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 0 0 0 0 0 0 1 0 1 0 0 0 1 0 0 0 0 0 0 -_D: Wendy_happy 0 1 1 1 0 0 1 1 1 1 0 0 1 1 1 0 0 0 0 1 0 1 0 0 0 0 1 0 1 0 0 0 0 0 1 0 1 0 0 1 1 0 0 1 0 1 0 0 0 0 0 1 0 0 1 0 0 1 0 1 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 1 0 0 0 1 0 0 0 0 1 0 0 0 0 0 1 1 0 0 0 1 0 0 0 0 1 1 0 0 0 0 1 1 0 0 0 0 0 0 0 0 1 1 0 0 0 0 1 1 1 0 1 0 1 0 1 0 1 1 0 0 0 0 0 1 0 1 0 0 0 1 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 1 0 0 0 0 0 -_D: Wendy_sad 0 1 1 1 0 0 1 1 1 1 0 0 1 1 1 0 0 0 0 1 0 1 0 0 0 0 1 1 1 0 0 0 0 0 1 0 1 0 1 0 0 1 0 1 0 1 0 0 0 0 0 1 0 0 0 1 1 0 0 1 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 1 0 0 0 1 0 0 0 0 1 0 0 0 0 0 1 1 0 0 0 1 0 0 0 0 1 1 0 0 0 0 1 1 0 0 0 0 0 0 0 0 1 1 0 0 0 0 1 1 0 0 1 0 1 0 0 0 1 1 0 0 0 0 0 1 1 1 0 0 0 1 1 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 1 0 0 0 0 0 -_D: Zane_happy 0 0 0 0 0 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 1 1 0 0 0 1 0 0 0 0 0 0 1 0 0 1 0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1 1 0 0 0 0 1 1 0 0 0 0 1 1 0 0 1 1 0 0 0 0 1 0 0 0 0 0 1 1 0 0 1 1 0 0 0 0 1 0 0 0 0 0 1 1 0 0 0 1 0 1 0 1 0 0 1 0 1 0 1 0 0 0 0 1 0 0 1 0 0 0 0 1 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 0 0 0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 -_D: Zane_sad 0 0 0 0 0 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 1 0 0 1 0 0 1 0 0 0 0 0 0 1 0 0 0 1 1 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1 1 0 0 0 0 1 1 0 0 0 0 1 1 0 0 1 1 0 0 0 0 1 0 0 0 0 0 1 1 0 0 1 1 0 0 0 0 1 0 0 0 0 0 1 1 0 0 0 1 0 0 0 1 0 0 1 0 0 0 1 0 0 0 0 1 0 1 1 0 0 0 0 1 1 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 0 0 0 0 0 1 0 1 0 0 0 0 0 1 0 0 0 0 diff --git a/tensor/stats/convolve/README.md b/tensor/stats/convolve/README.md deleted file mode 100644 index 2bb7cb632f..0000000000 --- a/tensor/stats/convolve/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# convolve - -`convolve.Slice32` operates on `[]float32` data, convolving data with a kernel, while `convolve.Slice64` operates on `[]float64` data. - -`SmoothTable` operates on an entire `table.Table` set of columns. - diff --git a/tensor/stats/convolve/convolve.go b/tensor/stats/convolve/convolve.go deleted file mode 100644 index e52a828296..0000000000 --- a/tensor/stats/convolve/convolve.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package convolve - -//go:generate core generate - -import ( - "errors" - - "cogentcore.org/core/base/slicesx" -) - -// Slice32 convolves given kernel with given source slice, putting results in -// destination, which is ensured to be the same size as the source slice, -// using existing capacity if available, and otherwise making a new slice. -// The kernel should be normalized, and odd-sized do it is symmetric about 0. -// Returns an error if sizes are not valid. -// No parallelization is used -- see Slice32Parallel for very large slices. -// Edges are handled separately with renormalized kernels -- they can be -// clipped from dest by excluding the kernel half-width from each end. -func Slice32(dest *[]float32, src []float32, kern []float32) error { - sz := len(src) - ksz := len(kern) - if ksz == 0 || sz == 0 { - return errors.New("convolve.Slice32: kernel or source are empty") - } - if ksz%2 == 0 { - return errors.New("convolve.Slice32: kernel is not odd sized") - } - if sz < ksz { - return errors.New("convolve.Slice32: source must be > kernel in size") - } - khalf := (ksz - 1) / 2 - *dest = slicesx.SetLength(*dest, sz) - for i := khalf; i < sz-khalf; i++ { - var sum float32 - for j := 0; j < ksz; j++ { - sum += src[(i-khalf)+j] * kern[j] - } - (*dest)[i] = sum - } - for i := 0; i < khalf; i++ { - var sum, ksum float32 - for j := 0; j <= khalf+i; j++ { - ki := (j + khalf) - i // 0: 1+kh, 1: etc - si := i + (ki - khalf) - // fmt.Printf("i: %d j: %d ki: %d si: %d\n", i, j, ki, si) - sum += src[si] * kern[ki] - ksum += kern[ki] - } - (*dest)[i] = sum / ksum - } - for i := sz - khalf; i < sz; i++ { - var sum, ksum float32 - ei := sz - i - 1 - for j := 0; j <= khalf+ei; j++ { - ki := ((ksz - 1) - (j + khalf)) + ei - si := i + (ki - khalf) - // fmt.Printf("i: %d j: %d ki: %d si: %d ei: %d\n", i, j, ki, si, ei) - sum += src[si] * kern[ki] - ksum += kern[ki] - } - (*dest)[i] = sum / ksum - } - return nil -} - -// Slice64 convolves given kernel with given source slice, putting results in -// destination, which is ensured to be the same size as the source slice, -// using existing capacity if available, and otherwise making a new slice. -// The kernel should be normalized, and odd-sized do it is symmetric about 0. -// Returns an error if sizes are not valid. -// No parallelization is used -- see Slice64Parallel for very large slices. -// Edges are handled separately with renormalized kernels -- they can be -// clipped from dest by excluding the kernel half-width from each end. -func Slice64(dest *[]float64, src []float64, kern []float64) error { - sz := len(src) - ksz := len(kern) - if ksz == 0 || sz == 0 { - return errors.New("convolve.Slice64: kernel or source are empty") - } - if ksz%2 == 0 { - return errors.New("convolve.Slice64: kernel is not odd sized") - } - if sz < ksz { - return errors.New("convolve.Slice64: source must be > kernel in size") - } - khalf := (ksz - 1) / 2 - *dest = slicesx.SetLength(*dest, sz) - for i := khalf; i < sz-khalf; i++ { - var sum float64 - for j := 0; j < ksz; j++ { - sum += src[(i-khalf)+j] * kern[j] - } - (*dest)[i] = sum - } - for i := 0; i < khalf; i++ { - var sum, ksum float64 - for j := 0; j <= khalf+i; j++ { - ki := (j + khalf) - i // 0: 1+kh, 1: etc - si := i + (ki - khalf) - // fmt.Printf("i: %d j: %d ki: %d si: %d\n", i, j, ki, si) - sum += src[si] * kern[ki] - ksum += kern[ki] - } - (*dest)[i] = sum / ksum - } - for i := sz - khalf; i < sz; i++ { - var sum, ksum float64 - ei := sz - i - 1 - for j := 0; j <= khalf+ei; j++ { - ki := ((ksz - 1) - (j + khalf)) + ei - si := i + (ki - khalf) - // fmt.Printf("i: %d j: %d ki: %d si: %d ei: %d\n", i, j, ki, si, ei) - sum += src[si] * kern[ki] - ksum += kern[ki] - } - (*dest)[i] = sum / ksum - } - return nil -} diff --git a/tensor/stats/convolve/convolve_test.go b/tensor/stats/convolve/convolve_test.go deleted file mode 100644 index 4bd1bff009..0000000000 --- a/tensor/stats/convolve/convolve_test.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package convolve - -import ( - "testing" - - "cogentcore.org/core/math32" -) - -func TestConv32(t *testing.T) { - kern := GaussianKernel32(2, .5) - // fmt.Printf("kern: %v\n", kern) - sz := 20 - src := make([]float32, sz) - for i := range src { - src[i] = float32(i) - } - var dest []float32 - Slice32(&dest, src, kern) - khalf := (len(kern) - 1) / 2 - for i := range src { - if i >= khalf && i < (sz-1-khalf) { - err := math32.Abs(src[i] - float32(i)) - if err > 1.0e-7 { - t.Errorf("error: %d:\t%g\t->\t%g\n", i, src[i], dest[i]) - } - } - // fmt.Printf("%d:\t%g\t->\t%g\n", i, src[i], dest[i]) - } -} diff --git a/tensor/stats/convolve/kernel.go b/tensor/stats/convolve/kernel.go deleted file mode 100644 index e93f55eab3..0000000000 --- a/tensor/stats/convolve/kernel.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package convolve - -import ( - "math" - - "cogentcore.org/core/math32" -) - -// GaussianKernel32 returns a normalized gaussian kernel for smoothing -// with given half-width and normalized sigma (actual sigma = khalf * sigma). -// A sigma value of .5 is typical for smaller half-widths for containing -// most of the gaussian efficiently -- anything lower than .33 is inefficient -- -// generally just use a lower half-width instead. -func GaussianKernel32(khalf int, sigma float32) []float32 { - ksz := khalf*2 + 1 - kern := make([]float32, ksz) - sigdiv := 1 / (sigma * float32(khalf)) - var sum float32 - for i := 0; i < ksz; i++ { - x := sigdiv * float32(i-khalf) - kv := math32.Exp(-0.5 * x * x) - kern[i] = kv - sum += kv - } - nfac := 1 / sum - for i := 0; i < ksz; i++ { - kern[i] *= nfac - } - return kern -} - -// GaussianKernel64 returns a normalized gaussian kernel -// with given half-width and normalized sigma (actual sigma = khalf * sigma) -// A sigma value of .5 is typical for smaller half-widths for containing -// most of the gaussian efficiently -- anything lower than .33 is inefficient -- -// generally just use a lower half-width instead. -func GaussianKernel64(khalf int, sigma float64) []float64 { - ksz := khalf*2 + 1 - kern := make([]float64, ksz) - sigdiv := 1 / (sigma * float64(khalf)) - var sum float64 - for i := 0; i < ksz; i++ { - x := sigdiv * float64(i-khalf) - kv := math.Exp(-0.5 * x * x) - kern[i] = kv - sum += kv - } - nfac := 1 / sum - for i := 0; i < ksz; i++ { - kern[i] *= nfac - } - return kern -} diff --git a/tensor/stats/convolve/table.go b/tensor/stats/convolve/table.go deleted file mode 100644 index 1381e21ef7..0000000000 --- a/tensor/stats/convolve/table.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package convolve - -import ( - "reflect" - - "cogentcore.org/core/tensor" - "cogentcore.org/core/tensor/table" -) - -// SmoothTable returns a cloned table with each of the floating-point -// columns in the source table smoothed over rows. -// khalf is the half-width of the Gaussian smoothing kernel, -// where larger values produce more smoothing. A sigma of .5 -// is used for the kernel. -func SmoothTable(src *table.Table, khalf int) *table.Table { - k64 := GaussianKernel64(khalf, .5) - k32 := GaussianKernel32(khalf, .5) - dest := src.Clone() - for ci, sci := range src.Columns { - switch sci.DataType() { - case reflect.Float32: - sc := sci.(*tensor.Float32) - dc := dest.Columns[ci].(*tensor.Float32) - Slice32(&dc.Values, sc.Values, k32) - case reflect.Float64: - sc := sci.(*tensor.Float64) - dc := dest.Columns[ci].(*tensor.Float64) - Slice64(&dc.Values, sc.Values, k64) - } - } - return dest -} diff --git a/tensor/stats/glm/README.md b/tensor/stats/glm/README.md deleted file mode 100644 index ffe5d5703d..0000000000 --- a/tensor/stats/glm/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# GLM = general linear model - -GLM contains results and parameters for running a [general linear model](https://en.wikipedia.org/wiki/General_linear_model), which is a general form of multivariate linear regression, supporting multiple independent and dependent variables. - -Make a `NewGLM` and then do `Run()` on a tensor [IndexView](../table/IndexView) with the relevant data in columns of the table. - -# Fitting Methods - -## Standard QR Decomposition - -The standard algorithm involves eigenvalue computation using [QR Decomposition](https://en.wikipedia.org/wiki/QR_decomposition). TODO. - -## Iterative Batch Mode Least Squares - -Batch-mode gradient descent is used and the relevant parameters can be altered from defaults before calling Run as needed. - -This mode supports [Ridge](https://en.wikipedia.org/wiki/Ridge_regression) (L2 norm) and [Lasso](https://en.wikipedia.org/wiki/Lasso_(statistics)) (L1 norm) forms of regression, which add different forms of weight decay to the LMS cost function. - diff --git a/tensor/stats/glm/glm.go b/tensor/stats/glm/glm.go deleted file mode 100644 index cbac41a520..0000000000 --- a/tensor/stats/glm/glm.go +++ /dev/null @@ -1,334 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package glm - -import ( - "fmt" - "math" - - "cogentcore.org/core/tensor" - "cogentcore.org/core/tensor/table" -) - -// todo: add tests - -// GLM contains results and parameters for running a general -// linear model, which is a general form of multivariate linear -// regression, supporting multiple independent and dependent -// variables. Make a NewGLM and then do Run() on a tensor -// table.IndexView with the relevant data in columns of the table. -// Batch-mode gradient descent is used and the relevant parameters -// can be altered from defaults before calling Run as needed. -type GLM struct { - // Coeff are the coefficients to map from input independent variables - // to the dependent variables. The first, outer dimension is number of - // dependent variables, and the second, inner dimension is number of - // independent variables plus one for the offset (b) (last element). - Coeff tensor.Float64 - - // mean squared error of the fitted values relative to data - MSE float64 - - // R2 is the r^2 total variance accounted for by the linear model, - // for each dependent variable = 1 - (ErrVariance / ObsVariance) - R2 []float64 - - // Observed variance of each of the dependent variables to be predicted. - ObsVariance []float64 - - // Variance of the error residuals per dependent variables - ErrVariance []float64 - - // optional names of the independent variables, for reporting results - IndepNames []string - - // optional names of the dependent variables, for reporting results - DepNames []string - - /////////////////////////////////////////// - // Parameters for the GLM model fitting: - - // ZeroOffset restricts the offset of the linear function to 0, - // forcing it to pass through the origin. Otherwise, a constant offset "b" - // is fit during the model fitting process. - ZeroOffset bool - - // learning rate parameter, which can be adjusted to reduce iterations based on - // specific properties of the data, but the default is reasonable for most "typical" data. - LRate float64 `default:"0.1"` - - // tolerance on difference in mean squared error (MSE) across iterations to stop - // iterating and consider the result to be converged. - StopTolerance float64 `default:"0.0001"` - - // Constant cost factor subtracted from weights, for the L1 norm or "Lasso" - // regression. This is good for producing sparse results but can arbitrarily - // select one of multiple correlated independent variables. - L1Cost float64 - - // Cost factor proportional to the coefficient value, for the L2 norm or "Ridge" - // regression. This is good for generally keeping weights small and equally - // penalizes correlated independent variables. - L2Cost float64 - - // CostStartIter is the iteration when we start applying the L1, L2 Cost factors. - // It is often a good idea to have a few unconstrained iterations prior to - // applying the cost factors. - CostStartIter int `default:"5"` - - // maximum number of iterations to perform - MaxIters int `default:"50"` - - /////////////////////////////////////////// - // Cached values from the table - - // Table of data - Table *table.IndexView - - // tensor columns from table with the respective variables - IndepVars, DepVars, PredVars, ErrVars tensor.Tensor - - // Number of independent and dependent variables - NIndepVars, NDepVars int -} - -func NewGLM() *GLM { - glm := &GLM{} - glm.Defaults() - return glm -} - -func (glm *GLM) Defaults() { - glm.LRate = 0.1 - glm.StopTolerance = 0.001 - glm.MaxIters = 50 - glm.CostStartIter = 5 -} - -func (glm *GLM) init(nIv, nDv int) { - glm.NIndepVars = nIv - glm.NDepVars = nDv - glm.Coeff.SetShape([]int{nDv, nIv + 1}, "DepVars", "IndepVars") - glm.R2 = make([]float64, nDv) - glm.ObsVariance = make([]float64, nDv) - glm.ErrVariance = make([]float64, nDv) - glm.IndepNames = make([]string, nIv) - glm.DepNames = make([]string, nDv) -} - -// SetTable sets the data to use from given indexview of table, where -// each of the Vars args specifies a column in the table, which can have either a -// single scalar value for each row, or a tensor cell with multiple values. -// predVars and errVars (predicted values and error values) are optional. -func (glm *GLM) SetTable(ix *table.IndexView, indepVars, depVars, predVars, errVars string) error { - dt := ix.Table - iv, err := dt.ColumnByName(indepVars) - if err != nil { - return err - } - dv, err := dt.ColumnByName(depVars) - if err != nil { - return err - } - var pv, ev tensor.Tensor - if predVars != "" { - pv, err = dt.ColumnByName(predVars) - if err != nil { - return err - } - } - if errVars != "" { - ev, err = dt.ColumnByName(errVars) - if err != nil { - return err - } - } - if pv != nil && !pv.Shape().IsEqual(dv.Shape()) { - return fmt.Errorf("predVars must have same shape as depVars") - } - if ev != nil && !ev.Shape().IsEqual(dv.Shape()) { - return fmt.Errorf("errVars must have same shape as depVars") - } - _, nIv := iv.RowCellSize() - _, nDv := dv.RowCellSize() - glm.init(nIv, nDv) - glm.Table = ix - glm.IndepVars = iv - glm.DepVars = dv - glm.PredVars = pv - glm.ErrVars = ev - return nil -} - -// Run performs the multi-variate linear regression using data SetTable function, -// learning linear coefficients and an overall static offset that best -// fits the observed dependent variables as a function of the independent variables. -// Initial values of the coefficients, and other parameters for the regression, -// should be set prior to running. -func (glm *GLM) Run() { - ix := glm.Table - iv := glm.IndepVars - dv := glm.DepVars - pv := glm.PredVars - ev := glm.ErrVars - - if pv == nil { - pv = dv.Clone() - } - if ev == nil { - ev = dv.Clone() - } - - nDv := glm.NDepVars - nIv := glm.NIndepVars - nCi := nIv + 1 - - dc := glm.Coeff.Clone().(*tensor.Float64) - - lastItr := false - sse := 0.0 - prevmse := 0.0 - n := ix.Len() - norm := 1.0 / float64(n) - lrate := norm * glm.LRate - for itr := 0; itr < glm.MaxIters; itr++ { - for i := range dc.Values { - dc.Values[i] = 0 - } - sse = 0 - if (itr+1)%10 == 0 { - lrate *= 0.5 - } - for i := 0; i < n; i++ { - row := ix.Indexes[i] - for di := 0; di < nDv; di++ { - pred := 0.0 - for ii := 0; ii < nIv; ii++ { - pred += glm.Coeff.Value([]int{di, ii}) * iv.FloatRowCell(row, ii) - } - if !glm.ZeroOffset { - pred += glm.Coeff.Value([]int{di, nIv}) - } - targ := dv.FloatRowCell(row, di) - err := targ - pred - sse += err * err - for ii := 0; ii < nIv; ii++ { - dc.Values[di*nCi+ii] += err * iv.FloatRowCell(row, ii) - } - if !glm.ZeroOffset { - dc.Values[di*nCi+nIv] += err - } - if lastItr { - pv.SetFloatRowCell(row, di, pred) - if ev != nil { - ev.SetFloatRowCell(row, di, err) - } - } - } - } - for di := 0; di < nDv; di++ { - for ii := 0; ii <= nIv; ii++ { - if glm.ZeroOffset && ii == nIv { - continue - } - idx := di*(nCi+1) + ii - w := glm.Coeff.Values[idx] - d := dc.Values[idx] - sgn := 1.0 - if w < 0 { - sgn = -1.0 - } else if w == 0 { - sgn = 0 - } - glm.Coeff.Values[idx] += lrate * (d - glm.L1Cost*sgn - glm.L2Cost*w) - } - } - glm.MSE = norm * sse - if lastItr { - break - } - if itr > 0 { - dmse := glm.MSE - prevmse - if math.Abs(dmse) < glm.StopTolerance || itr == glm.MaxIters-2 { - lastItr = true - } - } - fmt.Println(itr, glm.MSE) - prevmse = glm.MSE - } - - obsMeans := make([]float64, nDv) - errMeans := make([]float64, nDv) - for i := 0; i < n; i++ { - row := ix.Indexes[i] - for di := 0; di < nDv; di++ { - obsMeans[di] += dv.FloatRowCell(row, di) - errMeans[di] += ev.FloatRowCell(row, di) - } - } - for di := 0; di < nDv; di++ { - obsMeans[di] *= norm - errMeans[di] *= norm - glm.ObsVariance[di] = 0 - glm.ErrVariance[di] = 0 - } - for i := 0; i < n; i++ { - row := ix.Indexes[i] - for di := 0; di < nDv; di++ { - o := dv.FloatRowCell(row, di) - obsMeans[di] - glm.ObsVariance[di] += o * o - e := ev.FloatRowCell(row, di) - errMeans[di] - glm.ErrVariance[di] += e * e - } - } - for di := 0; di < nDv; di++ { - glm.ObsVariance[di] *= norm - glm.ErrVariance[di] *= norm - glm.R2[di] = 1.0 - (glm.ErrVariance[di] / glm.ObsVariance[di]) - } -} - -// Variance returns a description of the variance accounted for by the regression -// equation, R^2, for each dependent variable, along with the variances of -// observed and errors (residuals), which are used to compute it. -func (glm *GLM) Variance() string { - str := "" - for di := range glm.R2 { - if len(glm.DepNames) > di && glm.DepNames[di] != "" { - str += glm.DepNames[di] - } else { - str += fmt.Sprintf("DV %d", di) - } - str += fmt.Sprintf("\tR^2: %8.6g\tR: %8.6g\tVar Err: %8.4g\t Obs: %8.4g\n", glm.R2[di], math.Sqrt(glm.R2[di]), glm.ErrVariance[di], glm.ObsVariance[di]) - } - return str -} - -// Coeffs returns a string describing the coefficients -func (glm *GLM) Coeffs() string { - str := "" - for di := range glm.NDepVars { - if len(glm.DepNames) > di && glm.DepNames[di] != "" { - str += glm.DepNames[di] - } else { - str += fmt.Sprintf("DV %d", di) - } - str += " = " - for ii := 0; ii <= glm.NIndepVars; ii++ { - str += fmt.Sprintf("\t%8.6g", glm.Coeff.Value([]int{di, ii})) - if ii < glm.NIndepVars { - str += " * " - if len(glm.IndepNames) > ii && glm.IndepNames[di] != "" { - str += glm.IndepNames[di] - } else { - str += fmt.Sprintf("IV_%d", ii) - } - str += " + " - } - } - str += "\n" - } - return str -} diff --git a/tensor/stats/histogram/README.md b/tensor/stats/histogram/README.md deleted file mode 100644 index 0f716a5c80..0000000000 --- a/tensor/stats/histogram/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# histogram - -This package computes a histogram of values in either a `[]float32` or `[]float64` array: a count of number of values within different bins of value ranges. - -`F32` or `F64` computes the raw counts into a corresponding float array, and `F32Table` or `F64Table` construct an `table.Table` with Value and Count columns, suitable for plotting. - -The Value column represents the min value for each bin, with the max being, the value of the next bin, or the max if at the end. - diff --git a/tensor/stats/histogram/histogram.go b/tensor/stats/histogram/histogram.go deleted file mode 100644 index 3cf7e73c3a..0000000000 --- a/tensor/stats/histogram/histogram.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package histogram - -//go:generate core generate - -import ( - "cogentcore.org/core/base/slicesx" - "cogentcore.org/core/math32" - "cogentcore.org/core/tensor" - "cogentcore.org/core/tensor/table" -) - -// F64 generates a histogram of counts of values within given -// number of bins and min / max range. hist vals is sized to nBins. -// if value is < min or > max it is ignored. -func F64(hist *[]float64, vals []float64, nBins int, min, max float64) { - *hist = slicesx.SetLength(*hist, nBins) - h := *hist - // 0.1.2.3 = 3-0 = 4 bins - inc := (max - min) / float64(nBins) - for i := 0; i < nBins; i++ { - h[i] = 0 - } - for _, v := range vals { - if v < min || v > max { - continue - } - bin := int((v - min) / inc) - if bin >= nBins { - bin = nBins - 1 - } - h[bin] += 1 - } -} - -// F64Table generates an table with a histogram of counts of values within given -// number of bins and min / max range. The table has columns: Value, Count -// if value is < min or > max it is ignored. -// The Value column represents the min value for each bin, with the max being -// the value of the next bin, or the max if at the end. -func F64Table(dt *table.Table, vals []float64, nBins int, min, max float64) { - dt.DeleteAll() - dt.AddFloat64Column("Value") - dt.AddFloat64Column("Count") - dt.SetNumRows(nBins) - ct := dt.Columns[1].(*tensor.Float64) - F64(&ct.Values, vals, nBins, min, max) - inc := (max - min) / float64(nBins) - vls := dt.Columns[0].(*tensor.Float64).Values - for i := 0; i < nBins; i++ { - vls[i] = math32.Truncate64(min+float64(i)*inc, 4) - } -} - -////////////////////////////////////////////////////// -// float32 - -// F32 generates a histogram of counts of values within given -// number of bins and min / max range. hist vals is sized to nBins. -// if value is < min or > max it is ignored. -func F32(hist *[]float32, vals []float32, nBins int, min, max float32) { - *hist = slicesx.SetLength(*hist, nBins) - h := *hist - // 0.1.2.3 = 3-0 = 4 bins - inc := (max - min) / float32(nBins) - for i := 0; i < nBins; i++ { - h[i] = 0 - } - for _, v := range vals { - if v < min || v > max { - continue - } - bin := int((v - min) / inc) - if bin >= nBins { - bin = nBins - 1 - } - h[bin] += 1 - } -} - -// F32Table generates an table with a histogram of counts of values within given -// number of bins and min / max range. The table has columns: Value, Count -// if value is < min or > max it is ignored. -// The Value column represents the min value for each bin, with the max being -// the value of the next bin, or the max if at the end. -func F32Table(dt *table.Table, vals []float32, nBins int, min, max float32) { - dt.DeleteAll() - dt.AddFloat32Column("Value") - dt.AddFloat32Column("Count") - dt.SetNumRows(nBins) - ct := dt.Columns[1].(*tensor.Float32) - F32(&ct.Values, vals, nBins, min, max) - inc := (max - min) / float32(nBins) - vls := dt.Columns[0].(*tensor.Float32).Values - for i := 0; i < nBins; i++ { - vls[i] = math32.Truncate(min+float32(i)*inc, 4) - } -} diff --git a/tensor/stats/histogram/histogram_test.go b/tensor/stats/histogram/histogram_test.go deleted file mode 100644 index e2c31cee5e..0000000000 --- a/tensor/stats/histogram/histogram_test.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package histogram - -import ( - "testing" - - "cogentcore.org/core/tensor/table" - "github.com/stretchr/testify/assert" -) - -func TestHistogram32(t *testing.T) { - vals := []float32{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1} - ex := []float32{4, 3, 4} - res := []float32{} - - F32(&res, vals, 3, 0, 1) - - assert.Equal(t, ex, res) - - exvals := []float32{0, 0.3333, 0.6667} - dt := table.NewTable() - F32Table(dt, vals, 3, 0, 1) - for ri, v := range ex { - vv := float32(dt.Float("Value", ri)) - cv := float32(dt.Float("Count", ri)) - assert.Equal(t, exvals[ri], vv) - assert.Equal(t, v, cv) - } -} - -func TestHistogram64(t *testing.T) { - vals := []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1} - ex := []float64{4, 3, 4} - res := []float64{} - - F64(&res, vals, 3, 0, 1) - - assert.Equal(t, ex, res) - - exvals := []float64{0, 0.3333, 0.6667} - dt := table.NewTable() - F64Table(dt, vals, 3, 0, 1) - for ri, v := range ex { - vv := dt.Float("Value", ri) - cv := dt.Float("Count", ri) - assert.Equal(t, exvals[ri], vv) - assert.Equal(t, v, cv) - } -} diff --git a/tensor/stats/metric/README.md b/tensor/stats/metric/README.md deleted file mode 100644 index 3a09f6d033..0000000000 --- a/tensor/stats/metric/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# metric - -`metric` provides various similarity / distance metrics for comparing floating-point vectors. All functions have 32 and 64 bit variants, and skip NaN's (often used for missing) and will panic if the lengths of the two slices are unequal (no error return). - -The signatures of all such metric functions are identical, captured as types: `metric.Func32` and `metric.Func64` so that other functions that use a metric can take a pointer to any such function. - - diff --git a/tensor/stats/metric/abs.go b/tensor/stats/metric/abs.go deleted file mode 100644 index 7b0dbfe18b..0000000000 --- a/tensor/stats/metric/abs.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package metric - -import ( - "math" - - "cogentcore.org/core/math32" -) - -/////////////////////////////////////////// -// Abs - -// Abs32 computes the sum of absolute value of differences (L1 Norm). -// Skips NaN's and panics if lengths are not equal. -func Abs32(a, b []float32) float32 { - if len(a) != len(b) { - panic("metric: slice lengths do not match") - } - ss := float32(0) - for i, av := range a { - bv := b[i] - if math32.IsNaN(av) || math32.IsNaN(bv) { - continue - } - ss += math32.Abs(av - bv) - } - return ss -} - -// Abs64 computes the sum of absolute value of differences (L1 Norm). -// Skips NaN's and panics if lengths are not equal. -func Abs64(a, b []float64) float64 { - if len(a) != len(b) { - panic("metric: slice lengths do not match") - } - ss := float64(0) - for i, av := range a { - bv := b[i] - if math.IsNaN(av) || math.IsNaN(bv) { - continue - } - ss += math.Abs(av - bv) - } - return ss -} - -/////////////////////////////////////////// -// Hamming - -// Hamming32 computes the sum of 1's for every element that is different -// (city block). -// Skips NaN's and panics if lengths are not equal. -func Hamming32(a, b []float32) float32 { - if len(a) != len(b) { - panic("metric: slice lengths do not match") - } - ss := float32(0) - for i, av := range a { - bv := b[i] - if math32.IsNaN(av) || math32.IsNaN(bv) { - continue - } - if av != bv { - ss += 1 - } - } - return ss -} - -// Hamming64 computes the sum of absolute value of differences (L1 Norm). -// Skips NaN's and panics if lengths are not equal. -func Hamming64(a, b []float64) float64 { - if len(a) != len(b) { - panic("metric: slice lengths do not match") - } - ss := float64(0) - for i, av := range a { - bv := b[i] - if math.IsNaN(av) || math.IsNaN(bv) { - continue - } - if av != bv { - ss += 1 - } - } - return ss -} diff --git a/tensor/stats/metric/doc.go b/tensor/stats/metric/doc.go deleted file mode 100644 index 724f01572b..0000000000 --- a/tensor/stats/metric/doc.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package metric provides various similarity / distance metrics for comparing -floating-point vectors. -All functions have 32 and 64 bit variants, and skip NaN's (often used for missing) -and will panic if the lengths of the two slices are unequal (no error return). - -The signatures of all such metric functions are identical, captured as types: -metric.Func32 and metric.Func64 so that other functions that use a metric -can take a pointer to any such function. -*/ -package metric diff --git a/tensor/stats/metric/enumgen.go b/tensor/stats/metric/enumgen.go deleted file mode 100644 index e99276abeb..0000000000 --- a/tensor/stats/metric/enumgen.go +++ /dev/null @@ -1,50 +0,0 @@ -// Code generated by "core generate"; DO NOT EDIT. - -package metric - -import ( - "cogentcore.org/core/enums" -) - -var _StdMetricsValues = []StdMetrics{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12} - -// StdMetricsN is the highest valid value for type StdMetrics, plus one. -const StdMetricsN StdMetrics = 13 - -var _StdMetricsValueMap = map[string]StdMetrics{`Euclidean`: 0, `SumSquares`: 1, `Abs`: 2, `Hamming`: 3, `EuclideanBinTol`: 4, `SumSquaresBinTol`: 5, `InvCosine`: 6, `InvCorrelation`: 7, `CrossEntropy`: 8, `InnerProduct`: 9, `Covariance`: 10, `Correlation`: 11, `Cosine`: 12} - -var _StdMetricsDescMap = map[StdMetrics]string{0: ``, 1: ``, 2: ``, 3: ``, 4: ``, 5: ``, 6: `InvCosine is 1-Cosine -- useful to convert into an Increasing metric`, 7: `InvCorrelation is 1-Correlation -- useful to convert into an Increasing metric`, 8: ``, 9: `Everything below here is !Increasing -- larger = closer, not farther`, 10: ``, 11: ``, 12: ``} - -var _StdMetricsMap = map[StdMetrics]string{0: `Euclidean`, 1: `SumSquares`, 2: `Abs`, 3: `Hamming`, 4: `EuclideanBinTol`, 5: `SumSquaresBinTol`, 6: `InvCosine`, 7: `InvCorrelation`, 8: `CrossEntropy`, 9: `InnerProduct`, 10: `Covariance`, 11: `Correlation`, 12: `Cosine`} - -// String returns the string representation of this StdMetrics value. -func (i StdMetrics) String() string { return enums.String(i, _StdMetricsMap) } - -// SetString sets the StdMetrics value from its string representation, -// and returns an error if the string is invalid. -func (i *StdMetrics) SetString(s string) error { - return enums.SetString(i, s, _StdMetricsValueMap, "StdMetrics") -} - -// Int64 returns the StdMetrics value as an int64. -func (i StdMetrics) Int64() int64 { return int64(i) } - -// SetInt64 sets the StdMetrics value from an int64. -func (i *StdMetrics) SetInt64(in int64) { *i = StdMetrics(in) } - -// Desc returns the description of the StdMetrics value. -func (i StdMetrics) Desc() string { return enums.Desc(i, _StdMetricsDescMap) } - -// StdMetricsValues returns all possible values for the type StdMetrics. -func StdMetricsValues() []StdMetrics { return _StdMetricsValues } - -// Values returns all possible values for the type StdMetrics. -func (i StdMetrics) Values() []enums.Enum { return enums.Values(_StdMetricsValues) } - -// MarshalText implements the [encoding.TextMarshaler] interface. -func (i StdMetrics) MarshalText() ([]byte, error) { return []byte(i.String()), nil } - -// UnmarshalText implements the [encoding.TextUnmarshaler] interface. -func (i *StdMetrics) UnmarshalText(text []byte) error { - return enums.UnmarshalText(i, text, "StdMetrics") -} diff --git a/tensor/stats/metric/metric_test.go b/tensor/stats/metric/metric_test.go deleted file mode 100644 index 6cd11ac98d..0000000000 --- a/tensor/stats/metric/metric_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package metric - -import ( - "math" - "testing" - - "cogentcore.org/core/math32" -) - -func TestAll(t *testing.T) { - a64 := []float64{.5, .2, .1, .7, math.NaN(), .5} - b64 := []float64{.2, .5, .1, .7, 0, .2} - - a32 := []float32{.5, .2, .1, .7, math32.NaN(), .5} - b32 := []float32{.2, .5, .1, .7, 0, .2} - - ss := SumSquares64(a64, b64) - if ss != 0.27 { - t.Errorf("SumSquares64: %g\n", ss) - } - ss32 := SumSquares32(a32, b32) - if ss32 != float32(ss) { - t.Errorf("SumSquares32: %g\n", ss32) - } - - ec := Euclidean64(a64, b64) - if math.Abs(ec-math.Sqrt(0.27)) > 1.0e-10 { - t.Errorf("Euclidean64: %g vs. %g\n", ec, math.Sqrt(0.27)) - } - ec32 := Euclidean32(a32, b32) - if ec32 != float32(ec) { - t.Errorf("Euclidean32: %g\n", ec32) - } - - cv := Covariance64(a64, b64) - if cv != 0.023999999999999994 { - t.Errorf("Covariance64: %g\n", cv) - } - cv32 := Covariance32(a32, b32) - if cv32 != float32(cv) { - t.Errorf("Covariance32: %g\n", cv32) - } - - cr := Correlation64(a64, b64) - if cr != 0.47311118871909136 { - t.Errorf("Correlation64: %g\n", cr) - } - cr32 := Correlation32(a32, b32) - if cr32 != 0.47311115 { - t.Errorf("Correlation32: %g\n", cr32) - } - - cs := Cosine64(a64, b64) - if cs != 0.861061697819235 { - t.Errorf("Cosine64: %g\n", cs) - } - cs32 := Cosine32(a32, b32) - if cs32 != 0.86106175 { - t.Errorf("Cosine32: %g\n", cs32) - } - - ab := Abs64(a64, b64) - if ab != 0.8999999999999999 { - t.Errorf("Abs64: %g\n", ab) - } - ab32 := Abs32(a32, b32) - if ab32 != 0.90000004 { - t.Errorf("Abs32: %g\n", ab32) - } - - hm := Hamming64(a64, b64) - if hm != 3 { - t.Errorf("Hamming64: %g\n", hm) - } - hm32 := Hamming32(a32, b32) - if hm32 != 3 { - t.Errorf("Hamming32: %g\n", hm32) - } -} diff --git a/tensor/stats/metric/metrics.go b/tensor/stats/metric/metrics.go deleted file mode 100644 index 876be155c8..0000000000 --- a/tensor/stats/metric/metrics.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:generate core generate - -package metric - -// Func32 is a distance / similarity metric operating on slices of float32 numbers -type Func32 func(a, b []float32) float32 - -// Func64 is a distance / similarity metric operating on slices of float64 numbers -type Func64 func(a, b []float64) float64 - -// StdMetrics are standard metric functions -type StdMetrics int32 //enums:enum - -const ( - Euclidean StdMetrics = iota - SumSquares - Abs - Hamming - - EuclideanBinTol - SumSquaresBinTol - - // InvCosine is 1-Cosine -- useful to convert into an Increasing metric - InvCosine - - // InvCorrelation is 1-Correlation -- useful to convert into an Increasing metric - InvCorrelation - - CrossEntropy - - // Everything below here is !Increasing -- larger = closer, not farther - InnerProduct - Covariance - Correlation - Cosine -) - -// Increasing returns true if the distance metric is such that metric -// values increase as a function of distance (e.g., Euclidean) -// and false if metric values decrease as a function of distance -// (e.g., Cosine, Correlation) -func Increasing(std StdMetrics) bool { - if std >= InnerProduct { - return false - } - return true -} - -// StdFunc32 returns a standard metric function as specified -func StdFunc32(std StdMetrics) Func32 { - switch std { - case Euclidean: - return Euclidean32 - case SumSquares: - return SumSquares32 - case Abs: - return Abs32 - case Hamming: - return Hamming32 - case EuclideanBinTol: - return EuclideanBinTol32 - case SumSquaresBinTol: - return SumSquaresBinTol32 - case InvCorrelation: - return InvCorrelation32 - case InvCosine: - return InvCosine32 - case CrossEntropy: - return CrossEntropy32 - case InnerProduct: - return InnerProduct32 - case Covariance: - return Covariance32 - case Correlation: - return Correlation32 - case Cosine: - return Cosine32 - } - return nil -} - -// StdFunc64 returns a standard metric function as specified -func StdFunc64(std StdMetrics) Func64 { - switch std { - case Euclidean: - return Euclidean64 - case SumSquares: - return SumSquares64 - case Abs: - return Abs64 - case Hamming: - return Hamming64 - case EuclideanBinTol: - return EuclideanBinTol64 - case SumSquaresBinTol: - return SumSquaresBinTol64 - case InvCorrelation: - return InvCorrelation64 - case InvCosine: - return InvCosine64 - case CrossEntropy: - return CrossEntropy64 - case InnerProduct: - return InnerProduct64 - case Covariance: - return Covariance64 - case Correlation: - return Correlation64 - case Cosine: - return Cosine64 - } - return nil -} diff --git a/tensor/stats/metric/prob.go b/tensor/stats/metric/prob.go deleted file mode 100644 index 7749a1b1fd..0000000000 --- a/tensor/stats/metric/prob.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package metric - -import ( - "math" - - "cogentcore.org/core/math32" -) - -/////////////////////////////////////////// -// CrossEntropy - -// CrossEntropy32 computes cross-entropy between the two vectors. -// Skips NaN's and panics if lengths are not equal. -func CrossEntropy32(a, b []float32) float32 { - if len(a) != len(b) { - panic("metric: slice lengths do not match") - } - ss := float32(0) - for i, av := range a { - bv := b[i] - if math32.IsNaN(av) || math32.IsNaN(bv) { - continue - } - bv = math32.Max(bv, 0.000001) - bv = math32.Min(bv, 0.999999) - if av >= 1.0 { - ss += -math32.Log(bv) - } else if av <= 0.0 { - ss += -math32.Log(1.0 - bv) - } else { - ss += av*math32.Log(av/bv) + (1-av)*math32.Log((1-av)/(1-bv)) - } - } - return ss -} - -// CrossEntropy64 computes the cross-entropy between the two vectors. -// Skips NaN's and panics if lengths are not equal. -func CrossEntropy64(a, b []float64) float64 { - if len(a) != len(b) { - panic("metric: slice lengths do not match") - } - ss := float64(0) - for i, av := range a { - bv := b[i] - if math.IsNaN(av) || math.IsNaN(bv) { - continue - } - bv = math.Max(bv, 0.000001) - bv = math.Min(bv, 0.999999) - if av >= 1.0 { - ss += -math.Log(bv) - } else if av <= 0.0 { - ss += -math.Log(1.0 - bv) - } else { - ss += av*math.Log(av/bv) + (1-av)*math.Log((1-av)/(1-bv)) - } - } - return ss -} diff --git a/tensor/stats/metric/squares.go b/tensor/stats/metric/squares.go deleted file mode 100644 index 61576ba56a..0000000000 --- a/tensor/stats/metric/squares.go +++ /dev/null @@ -1,606 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package metric - -import ( - "math" - - "cogentcore.org/core/math32" - "cogentcore.org/core/tensor/stats/stats" -) - -/////////////////////////////////////////// -// SumSquares - -// SumSquares32 computes the sum-of-squares distance between two vectors. -// Skips NaN's and panics if lengths are not equal. -// Uses optimized algorithm from BLAS that avoids numerical overflow. -func SumSquares32(a, b []float32) float32 { - if len(a) != len(b) { - panic("metric: slice lengths do not match") - } - n := len(a) - if n < 2 { - if n == 1 { - return math32.Abs(a[0] - b[0]) - } - return 0 - } - var ( - scale float32 = 0 - sumSquares float32 = 1 - ) - for i, av := range a { - bv := b[i] - if av == bv || math32.IsNaN(av) || math32.IsNaN(bv) { - continue - } - absxi := math32.Abs(av - bv) - if scale < absxi { - sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) - scale = absxi - } else { - sumSquares = sumSquares + (absxi/scale)*(absxi/scale) - } - } - if math32.IsInf(scale, 1) { - return math32.Inf(1) - } - return scale * scale * sumSquares -} - -// SumSquares64 computes the sum-of-squares distance between two vectors. -// Skips NaN's and panics if lengths are not equal. -// Uses optimized algorithm from BLAS that avoids numerical overflow. -func SumSquares64(a, b []float64) float64 { - if len(a) != len(b) { - panic("metric: slice lengths do not match") - } - n := len(a) - if n < 2 { - if n == 1 { - return math.Abs(a[0] - b[0]) - } - return 0 - } - var ( - scale float64 = 0 - sumSquares float64 = 1 - ) - for i, av := range a { - bv := b[i] - if av == bv || math.IsNaN(av) || math.IsNaN(bv) { - continue - } - absxi := math.Abs(av - bv) - if scale < absxi { - sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) - scale = absxi - } else { - sumSquares = sumSquares + (absxi/scale)*(absxi/scale) - } - } - if math.IsInf(scale, 1) { - return math.Inf(1) - } - return scale * scale * sumSquares -} - -/////////////////////////////////////////// -// SumSquaresBinTol - -// SumSquaresBinTol32 computes the sum-of-squares distance between two vectors. -// Skips NaN's and panics if lengths are not equal. -// Uses optimized algorithm from BLAS that avoids numerical overflow. -// BinTol version uses binary tolerance for 0-1 valued-vectors where -// abs diff < .5 counts as 0 error (i.e., closer than not). -func SumSquaresBinTol32(a, b []float32) float32 { - if len(a) != len(b) { - panic("metric: slice lengths do not match") - } - n := len(a) - if n < 2 { - if n == 1 { - return math32.Abs(a[0] - b[0]) - } - return 0 - } - var ( - scale float32 = 0 - sumSquares float32 = 1 - ) - for i, av := range a { - bv := b[i] - if av == bv || math32.IsNaN(av) || math32.IsNaN(bv) { - continue - } - absxi := math32.Abs(av - bv) - if absxi < 0.5 { - continue - } - if scale < absxi { - sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) - scale = absxi - } else { - sumSquares = sumSquares + (absxi/scale)*(absxi/scale) - } - } - if math32.IsInf(scale, 1) { - return math32.Inf(1) - } - return scale * scale * sumSquares -} - -// SumSquaresBinTol64 computes the sum-of-squares distance between two vectors. -// Skips NaN's and panics if lengths are not equal. -// Uses optimized algorithm from BLAS that avoids numerical overflow. -// BinTol version uses binary tolerance for 0-1 valued-vectors where -// abs diff < .5 counts as 0 error (i.e., closer than not). -func SumSquaresBinTol64(a, b []float64) float64 { - if len(a) != len(b) { - panic("metric: slice lengths do not match") - } - n := len(a) - if n < 2 { - if n == 1 { - return math.Abs(a[0] - b[0]) - } - return 0 - } - var ( - scale float64 = 0 - sumSquares float64 = 1 - ) - for i, av := range a { - bv := b[i] - if av == bv || math.IsNaN(av) || math.IsNaN(bv) { - continue - } - absxi := math.Abs(av - bv) - if absxi < 0.5 { - continue - } - if scale < absxi { - sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) - scale = absxi - } else { - sumSquares = sumSquares + (absxi/scale)*(absxi/scale) - } - } - if math.IsInf(scale, 1) { - return math.Inf(1) - } - return scale * scale * sumSquares -} - -/////////////////////////////////////////// -// Euclidean - -// Euclidean32 computes the square-root of sum-of-squares distance -// between two vectors (aka the L2 norm). -// Skips NaN's and panics if lengths are not equal. -// Uses optimized algorithm from BLAS that avoids numerical overflow. -func Euclidean32(a, b []float32) float32 { - if len(a) != len(b) { - panic("metric: slice lengths do not match") - } - n := len(a) - if n < 2 { - if n == 1 { - return math32.Abs(a[0] - b[0]) - } - return 0 - } - var ( - scale float32 = 0 - sumSquares float32 = 1 - ) - for i, av := range a { - bv := b[i] - if av == bv || math32.IsNaN(av) || math32.IsNaN(bv) { - continue - } - absxi := math32.Abs(av - bv) - if scale < absxi { - sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) - scale = absxi - } else { - sumSquares = sumSquares + (absxi/scale)*(absxi/scale) - } - } - if math32.IsInf(scale, 1) { - return math32.Inf(1) - } - return scale * math32.Sqrt(sumSquares) -} - -// Euclidean64 computes the square-root of sum-of-squares distance -// between two vectors (aka the L2 norm). -// Skips NaN's and panics if lengths are not equal. -// Uses optimized algorithm from BLAS that avoids numerical overflow. -func Euclidean64(a, b []float64) float64 { - if len(a) != len(b) { - panic("metric: slice lengths do not match") - } - n := len(a) - if n < 2 { - if n == 1 { - return math.Abs(a[0] - b[0]) - } - return 0 - } - var ( - scale float64 = 0 - sumSquares float64 = 1 - ) - for i, av := range a { - bv := b[i] - if av == bv || math.IsNaN(av) || math.IsNaN(bv) { - continue - } - absxi := math.Abs(av - bv) - if scale < absxi { - sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) - scale = absxi - } else { - sumSquares = sumSquares + (absxi/scale)*(absxi/scale) - } - } - if math.IsInf(scale, 1) { - return math.Inf(1) - } - return scale * math.Sqrt(sumSquares) -} - -/////////////////////////////////////////// -// EuclideanBinTol - -// EuclideanBinTol32 computes the square-root of sum-of-squares distance -// between two vectors (aka the L2 norm). -// Skips NaN's and panics if lengths are not equal. -// Uses optimized algorithm from BLAS that avoids numerical overflow. -// BinTol version uses binary tolerance for 0-1 valued-vectors where -// abs diff < .5 counts as 0 error (i.e., closer than not). -func EuclideanBinTol32(a, b []float32) float32 { - if len(a) != len(b) { - panic("metric: slice lengths do not match") - } - n := len(a) - if n < 2 { - if n == 1 { - return math32.Abs(a[0] - b[0]) - } - return 0 - } - var ( - scale float32 = 0 - sumSquares float32 = 1 - ) - for i, av := range a { - bv := b[i] - if av == bv || math32.IsNaN(av) || math32.IsNaN(bv) { - continue - } - absxi := math32.Abs(av - bv) - if absxi < 0.5 { - continue - } - if scale < absxi { - sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) - scale = absxi - } else { - sumSquares = sumSquares + (absxi/scale)*(absxi/scale) - } - } - if math32.IsInf(scale, 1) { - return math32.Inf(1) - } - return scale * math32.Sqrt(sumSquares) -} - -// EuclideanBinTol64 computes the square-root of sum-of-squares distance -// between two vectors (aka the L2 norm). -// Skips NaN's and panics if lengths are not equal. -// Uses optimized algorithm from BLAS that avoids numerical overflow. -// BinTol version uses binary tolerance for 0-1 valued-vectors where -// abs diff < .5 counts as 0 error (i.e., closer than not). -func EuclideanBinTol64(a, b []float64) float64 { - if len(a) != len(b) { - panic("metric: slice lengths do not match") - } - n := len(a) - if n < 2 { - if n == 1 { - return math.Abs(a[0] - b[0]) - } - return 0 - } - var ( - scale float64 = 0 - sumSquares float64 = 1 - ) - for i, av := range a { - bv := b[i] - if av == bv || math.IsNaN(av) || math.IsNaN(bv) { - continue - } - absxi := math.Abs(av - bv) - if absxi < 0.5 { - continue - } - if scale < absxi { - sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) - scale = absxi - } else { - sumSquares = sumSquares + (absxi/scale)*(absxi/scale) - } - } - if math.IsInf(scale, 1) { - return math.Inf(1) - } - return scale * math.Sqrt(sumSquares) -} - -/////////////////////////////////////////// -// Covariance - -// Covariance32 computes the mean of the co-product of each vector element minus -// the mean of that vector: cov(A,B) = E[(A - E(A))(B - E(B))] -// Skips NaN's and panics if lengths are not equal. -func Covariance32(a, b []float32) float32 { - if len(a) != len(b) { - panic("metric: slice lengths do not match") - } - ss := float32(0) - am := stats.Mean32(a) - bm := stats.Mean32(b) - n := 0 - for i, av := range a { - bv := b[i] - if math32.IsNaN(av) || math32.IsNaN(bv) { - continue - } - ss += (av - am) * (bv - bm) - n++ - } - if n > 0 { - ss /= float32(n) - } - return ss -} - -// Covariance64 computes the mean of the co-product of each vector element minus -// the mean of that vector: cov(A,B) = E[(A - E(A))(B - E(B))] -// Skips NaN's and panics if lengths are not equal. -func Covariance64(a, b []float64) float64 { - if len(a) != len(b) { - panic("metric: slice lengths do not match") - } - ss := float64(0) - am := stats.Mean64(a) - bm := stats.Mean64(b) - n := 0 - for i, av := range a { - bv := b[i] - if math.IsNaN(av) || math.IsNaN(bv) { - continue - } - ss += (av - am) * (bv - bm) - n++ - } - if n > 0 { - ss /= float64(n) - } - return ss -} - -/////////////////////////////////////////// -// Correlation - -// Correlation32 computes the vector similarity in range (-1..1) as the -// mean of the co-product of each vector element minus the mean of that vector, -// normalized by the product of their standard deviations: -// cor(A,B) = E[(A - E(A))(B - E(B))] / sigma(A) sigma(B). -// (i.e., the standardized covariance) -- equivalent to the cosine of mean-normalized -// vectors. -// Skips NaN's and panics if lengths are not equal. -func Correlation32(a, b []float32) float32 { - if len(a) != len(b) { - panic("metric: slice lengths do not match") - } - ss := float32(0) - am := stats.Mean32(a) - bm := stats.Mean32(b) - var avar, bvar float32 - for i, av := range a { - bv := b[i] - if math32.IsNaN(av) || math32.IsNaN(bv) { - continue - } - ad := av - am - bd := bv - bm - ss += ad * bd // between - avar += ad * ad // within - bvar += bd * bd - } - vp := math32.Sqrt(avar * bvar) - if vp > 0 { - ss /= vp - } - return ss -} - -// Correlation64 computes the vector similarity in range (-1..1) as the -// mean of the co-product of each vector element minus the mean of that vector, -// normalized by the product of their standard deviations: -// cor(A,B) = E[(A - E(A))(B - E(B))] / sigma(A) sigma(B). -// (i.e., the standardized covariance) -- equivalent to the cosine of mean-normalized -// vectors. -// Skips NaN's and panics if lengths are not equal. -func Correlation64(a, b []float64) float64 { - if len(a) != len(b) { - panic("metric: slice lengths do not match") - } - ss := float64(0) - am := stats.Mean64(a) - bm := stats.Mean64(b) - var avar, bvar float64 - for i, av := range a { - bv := b[i] - if math.IsNaN(av) || math.IsNaN(bv) { - continue - } - ad := av - am - bd := bv - bm - ss += ad * bd // between - avar += ad * ad // within - bvar += bd * bd - } - vp := math.Sqrt(avar * bvar) - if vp > 0 { - ss /= vp - } - return ss -} - -/////////////////////////////////////////// -// InnerProduct - -// InnerProduct32 computes the sum of the element-wise product of the two vectors. -// Skips NaN's and panics if lengths are not equal. -func InnerProduct32(a, b []float32) float32 { - if len(a) != len(b) { - panic("metric: slice lengths do not match") - } - ss := float32(0) - for i, av := range a { - bv := b[i] - if math32.IsNaN(av) || math32.IsNaN(bv) { - continue - } - ss += av * bv - } - return ss -} - -// InnerProduct64 computes the mean of the co-product of each vector element minus -// the mean of that vector, normalized by the product of their standard deviations: -// cor(A,B) = E[(A - E(A))(B - E(B))] / sigma(A) sigma(B). -// (i.e., the standardized covariance) -- equivalent to the cosine of mean-normalized -// vectors. -// Skips NaN's and panics if lengths are not equal. -func InnerProduct64(a, b []float64) float64 { - if len(a) != len(b) { - panic("metric: slice lengths do not match") - } - ss := float64(0) - for i, av := range a { - bv := b[i] - if math.IsNaN(av) || math.IsNaN(bv) { - continue - } - ss += av * bv - } - return ss -} - -/////////////////////////////////////////// -// Cosine - -// Cosine32 computes the cosine of the angle between two vectors (-1..1), -// as the normalized inner product: inner product / sqrt(ssA * ssB). -// If vectors are mean-normalized = Correlation. -// Skips NaN's and panics if lengths are not equal. -func Cosine32(a, b []float32) float32 { - if len(a) != len(b) { - panic("metric: slice lengths do not match") - } - ss := float32(0) - var ass, bss float32 - for i, av := range a { - bv := b[i] - if math32.IsNaN(av) || math32.IsNaN(bv) { - continue - } - ss += av * bv // between - ass += av * av // within - bss += bv * bv - } - vp := math32.Sqrt(ass * bss) - if vp > 0 { - ss /= vp - } - return ss -} - -// Cosine32 computes the cosine of the angle between two vectors (-1..1), -// as the normalized inner product: inner product / sqrt(ssA * ssB). -// If vectors are mean-normalized = Correlation. -// Skips NaN's and panics if lengths are not equal. -func Cosine64(a, b []float64) float64 { - if len(a) != len(b) { - panic("metric: slice lengths do not match") - } - ss := float64(0) - var ass, bss float64 - for i, av := range a { - bv := b[i] - if math.IsNaN(av) || math.IsNaN(bv) { - continue - } - ss += av * bv // between - ass += av * av // within - bss += bv * bv - } - vp := math.Sqrt(ass * bss) - if vp > 0 { - ss /= vp - } - return ss -} - -/////////////////////////////////////////// -// InvCosine - -// InvCosine32 computes 1 - cosine of the angle between two vectors (-1..1), -// as the normalized inner product: inner product / sqrt(ssA * ssB). -// If vectors are mean-normalized = Correlation. -// Skips NaN's and panics if lengths are not equal. -func InvCosine32(a, b []float32) float32 { - return 1 - Cosine32(a, b) -} - -// InvCosine32 computes 1 - cosine of the angle between two vectors (-1..1), -// as the normalized inner product: inner product / sqrt(ssA * ssB). -// If vectors are mean-normalized = Correlation. -// Skips NaN's and panics if lengths are not equal. -func InvCosine64(a, b []float64) float64 { - return 1 - Cosine64(a, b) -} - -/////////////////////////////////////////// -// InvCorrelation - -// InvCorrelation32 computes 1 - the vector similarity in range (-1..1) as the -// mean of the co-product of each vector element minus the mean of that vector, -// normalized by the product of their standard deviations: -// cor(A,B) = E[(A - E(A))(B - E(B))] / sigma(A) sigma(B). -// (i.e., the standardized covariance) -- equivalent to the cosine of mean-normalized -// vectors. -// Skips NaN's and panics if lengths are not equal. -func InvCorrelation32(a, b []float32) float32 { - return 1 - Correlation32(a, b) -} - -// InvCorrelation64 computes 1 - the vector similarity in range (-1..1) as the -// mean of the co-product of each vector element minus the mean of that vector, -// normalized by the product of their standard deviations: -// cor(A,B) = E[(A - E(A))(B - E(B))] / sigma(A) sigma(B). -// (i.e., the standardized covariance) -- equivalent to the cosine of mean-normalized -// vectors. -// Skips NaN's and panics if lengths are not equal. -func InvCorrelation64(a, b []float64) float64 { - return 1 - Correlation64(a, b) -} diff --git a/tensor/stats/metric/tensor.go b/tensor/stats/metric/tensor.go deleted file mode 100644 index 6d9ae20455..0000000000 --- a/tensor/stats/metric/tensor.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package metric - -import ( - "math" - - "cogentcore.org/core/tensor" -) - -// ClosestRow32 returns the closest fit between probe pattern and patterns in -// an tensor with float32 data where the outer-most dimension is assumed to be a row -// (e.g., as a column in an table), using the given metric function, -// *which must have the Increasing property* -- i.e., larger = further. -// returns the row and metric value for that row. -// Col cell sizes must match size of probe (panics if not). -func ClosestRow32(probe tensor.Tensor, col tensor.Tensor, mfun Func32) (int, float32) { - pr := probe.(*tensor.Float32) - cl := col.(*tensor.Float32) - rows := col.Shape().DimSize(0) - csz := col.Len() / rows - if csz != probe.Len() { - panic("metric.ClosestRow32: probe size != cell size of tensor column!\n") - } - ci := -1 - minv := float32(math.MaxFloat32) - for ri := 0; ri < rows; ri++ { - st := ri * csz - rvals := cl.Values[st : st+csz] - v := mfun(pr.Values, rvals) - if v < minv { - ci = ri - minv = v - } - } - return ci, minv -} - -// ClosestRow64 returns the closest fit between probe pattern and patterns in -// a tensor with float64 data where the outer-most dimension is assumed to be a row -// (e.g., as a column in an table), using the given metric function, -// *which must have the Increasing property* -- i.e., larger = further. -// returns the row and metric value for that row. -// Col cell sizes must match size of probe (panics if not). -func ClosestRow64(probe tensor.Tensor, col tensor.Tensor, mfun Func64) (int, float64) { - pr := probe.(*tensor.Float64) - cl := col.(*tensor.Float64) - rows := col.DimSize(0) - csz := col.Len() / rows - if csz != probe.Len() { - panic("metric.ClosestRow64: probe size != cell size of tensor column!\n") - } - ci := -1 - minv := math.MaxFloat64 - for ri := 0; ri < rows; ri++ { - st := ri * csz - rvals := cl.Values[st : st+csz] - v := mfun(pr.Values, rvals) - if v < minv { - ci = ri - minv = v - } - } - return ci, minv -} - -// ClosestRow32Py returns the closest fit between probe pattern and patterns in -// an tensor.Float32 where the outer-most dimension is assumed to be a row -// (e.g., as a column in an table), using the given metric function, -// *which must have the Increasing property* -- i.e., larger = further. -// returns the row and metric value for that row. -// Col cell sizes must match size of probe (panics if not). -// Py version is for Python, returns a slice with row, cor, takes std metric -func ClosestRow32Py(probe tensor.Tensor, col tensor.Tensor, std StdMetrics) []float32 { - row, cor := ClosestRow32(probe, col, StdFunc32(std)) - return []float32{float32(row), cor} -} - -// ClosestRow64Py returns the closest fit between probe pattern and patterns in -// an tensor.Tensor where the outer-most dimension is assumed to be a row -// (e.g., as a column in an table), using the given metric function, -// *which must have the Increasing property* -- i.e., larger = further. -// returns the row and metric value for that row. -// Col cell sizes must match size of probe (panics if not). -// Optimized for tensor.Float64 but works for any tensor. -// Py version is for Python, returns a slice with row, cor, takes std metric -func ClosestRow64Py(probe tensor.Tensor, col tensor.Tensor, std StdMetrics) []float64 { - row, cor := ClosestRow64(probe, col, StdFunc64(std)) - return []float64{float64(row), cor} -} diff --git a/tensor/stats/metric/tol.go b/tensor/stats/metric/tol.go deleted file mode 100644 index a99058561f..0000000000 --- a/tensor/stats/metric/tol.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package metric - -import ( - "math" - - "cogentcore.org/core/math32" -) - -/////////////////////////////////////////// -// Tolerance - -// Tolerance32 sets a = b for any element where |a-b| <= tol. -// This can be called prior to any metric function. -func Tolerance32(a, b []float32, tol float32) { - if len(a) != len(b) { - panic("metric: slice lengths do not match") - } - for i, av := range a { - bv := b[i] - if math32.IsNaN(av) || math32.IsNaN(bv) { - continue - } - if math32.Abs(av-bv) <= tol { - a[i] = bv - } - } -} - -// Tolerance64 sets a = b for any element where |a-b| <= tol. -// This can be called prior to any metric function. -func Tolerance64(a, b []float64, tol float64) { - if len(a) != len(b) { - panic("metric: slice lengths do not match") - } - for i, av := range a { - bv := b[i] - if math.IsNaN(av) || math.IsNaN(bv) { - continue - } - if math.Abs(av-bv) <= tol { - a[i] = bv - } - } -} diff --git a/tensor/stats/norm/README.md b/tensor/stats/norm/README.md deleted file mode 100644 index ca41bdf01f..0000000000 --- a/tensor/stats/norm/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# norm - -`norm` provides normalization of vector and tensor values. The basic functions operate on either `[]float32` or `[]float64` data, with Tensor versions using those, only for Float32 and Float64 tensors. - -* DivNorm does divisive normalization of elements -* SubNorm does subtractive normalization of elements -* ZScore subtracts the mean and divides by the standard deviation -* Abs performs absolute-value on all elements (e.g., use prior to [stats](../stats) to produce Mean of Abs vals etc). - - diff --git a/tensor/stats/norm/abs.go b/tensor/stats/norm/abs.go deleted file mode 100644 index 699152382d..0000000000 --- a/tensor/stats/norm/abs.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package norm - -import ( - "fmt" - "log/slog" - "math" - - "cogentcore.org/core/math32" - "cogentcore.org/core/tensor" -) - -// Abs32 applies the Abs function to each element in given slice -func Abs32(a []float32) { - for i, av := range a { - if math32.IsNaN(av) { - continue - } - a[i] = math32.Abs(av) - } -} - -// Abs64 applies the Abs function to each element in given slice -func Abs64(a []float64) { - for i, av := range a { - if math.IsNaN(av) { - continue - } - a[i] = math.Abs(av) - } -} - -func FloatOnlyError() error { - err := fmt.Errorf("Only float32 or float64 data types supported") - slog.Error(err.Error()) - return err -} - -// AbsTensor applies the Abs function to each element in given tensor, -// for float32 and float64 data types. -func AbsTensor(a tensor.Tensor) { - switch tsr := a.(type) { - case *tensor.Float32: - Abs32(tsr.Values) - case *tensor.Float64: - Abs64(tsr.Values) - default: - FloatOnlyError() - } -} diff --git a/tensor/stats/norm/doc.go b/tensor/stats/norm/doc.go deleted file mode 100644 index c3b769433e..0000000000 --- a/tensor/stats/norm/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package norm provides normalization and norm metric computations -e.g., L2 = sqrt of sum of squares of a vector. - -DivNorm does divisive normalization of elements -SubNorm does subtractive normalization of elements -ZScore subtracts the mean and divides by the standard deviation -*/ -package norm diff --git a/tensor/stats/norm/norm.go b/tensor/stats/norm/norm.go deleted file mode 100644 index 1090e2fa53..0000000000 --- a/tensor/stats/norm/norm.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package norm - -//go:generate core generate - -import ( - "math" - - "cogentcore.org/core/math32" - "cogentcore.org/core/tensor" - "cogentcore.org/core/tensor/stats/stats" -) - -// FloatFunc applies given functions to float tensor data, which is either Float32 or Float64 -func FloatFunc(tsr tensor.Tensor, nfunc32 Func32, nfunc64 Func64, stIdx, nIdx int, ffunc32 func(a []float32, fun Func32), ffunc64 func(a []float64, fun Func64)) { - switch tt := tsr.(type) { - case *tensor.Float32: - vals := tt.Values - if nIdx > 0 { - vals = vals[stIdx : stIdx+nIdx] - } - ffunc32(vals, nfunc32) - case *tensor.Float64: - vals := tt.Values - if nIdx > 0 { - vals = vals[stIdx : stIdx+nIdx] - } - ffunc64(vals, nfunc64) - default: - FloatOnlyError() - } -} - -/////////////////////////////////////////// -// DivNorm - -// DivNorm32 does divisive normalization by given norm function -// i.e., it divides each element by the norm value computed from nfunc. -func DivNorm32(a []float32, nfunc Func32) { - nv := nfunc(a) - if nv != 0 { - MultVector32(a, 1/nv) - } -} - -// DivNorm64 does divisive normalization by given norm function -// i.e., it divides each element by the norm value computed from nfunc. -func DivNorm64(a []float64, nfunc Func64) { - nv := nfunc(a) - if nv != 0 { - MultVec64(a, 1/nv) - } -} - -/////////////////////////////////////////// -// SubNorm - -// SubNorm32 does subtractive normalization by given norm function -// i.e., it subtracts norm computed by given function from each element. -func SubNorm32(a []float32, nfunc Func32) { - nv := nfunc(a) - AddVector32(a, -nv) -} - -// SubNorm64 does subtractive normalization by given norm function -// i.e., it subtracts norm computed by given function from each element. -func SubNorm64(a []float64, nfunc Func64) { - nv := nfunc(a) - AddVec64(a, -nv) -} - -/////////////////////////////////////////// -// ZScore - -// ZScore32 subtracts the mean and divides by the standard deviation -func ZScore32(a []float32) { - SubNorm32(a, stats.Mean32) - DivNorm32(a, stats.Std32) -} - -// ZScore64 subtracts the mean and divides by the standard deviation -func ZScore64(a []float64) { - SubNorm64(a, stats.Mean64) - DivNorm64(a, stats.Std64) -} - -/////////////////////////////////////////// -// Unit - -// Unit32 subtracts the min and divides by the max, so that values are in 0-1 unit range -func Unit32(a []float32) { - SubNorm32(a, stats.Min32) - DivNorm32(a, stats.Max32) -} - -// Unit64 subtracts the min and divides by the max, so that values are in 0-1 unit range -func Unit64(a []float64) { - SubNorm64(a, stats.Min64) - DivNorm64(a, stats.Max64) -} - -/////////////////////////////////////////// -// MultVec - -// MultVector32 multiplies vector elements by scalar -func MultVector32(a []float32, val float32) { - for i, av := range a { - if math32.IsNaN(av) { - continue - } - a[i] *= val - } -} - -// MultVec64 multiplies vector elements by scalar -func MultVec64(a []float64, val float64) { - for i, av := range a { - if math.IsNaN(av) { - continue - } - a[i] *= val - } -} - -/////////////////////////////////////////// -// AddVec - -// AddVector32 adds scalar to vector -func AddVector32(a []float32, val float32) { - for i, av := range a { - if math32.IsNaN(av) { - continue - } - a[i] += val - } -} - -// AddVec64 adds scalar to vector -func AddVec64(a []float64, val float64) { - for i, av := range a { - if math.IsNaN(av) { - continue - } - a[i] += val - } -} - -/////////////////////////////////////////// -// Thresh - -// Thresh32 thresholds the values of the vector -- anything above the high threshold is set -// to the high value, and everything below the low threshold is set to the low value. -func Thresh32(a []float32, hi bool, hiThr float32, lo bool, loThr float32) { - for i, av := range a { - if math32.IsNaN(av) { - continue - } - if hi && av > hiThr { - a[i] = hiThr - } - if lo && av < loThr { - a[i] = loThr - } - } -} - -// Thresh64 thresholds the values of the vector -- anything above the high threshold is set -// to the high value, and everything below the low threshold is set to the low value. -func Thresh64(a []float64, hi bool, hiThr float64, lo bool, loThr float64) { - for i, av := range a { - if math.IsNaN(av) { - continue - } - if hi && av > hiThr { - a[i] = hiThr - } - if lo && av < loThr { - a[i] = loThr - } - } -} - -/////////////////////////////////////////// -// Binarize - -// Binarize32 turns vector into binary-valued, by setting anything >= the threshold -// to the high value, and everything below to the low value. -func Binarize32(a []float32, thr, hiVal, loVal float32) { - for i, av := range a { - if math32.IsNaN(av) { - continue - } - if av >= thr { - a[i] = hiVal - } else { - a[i] = loVal - } - } -} - -// Binarize64 turns vector into binary-valued, by setting anything >= the threshold -// to the high value, and everything below to the low value. -func Binarize64(a []float64, thr, hiVal, loVal float64) { - for i, av := range a { - if math.IsNaN(av) { - continue - } - if av >= thr { - a[i] = hiVal - } else { - a[i] = loVal - } - } -} - -// Func32 is a norm function operating on slice of float32 numbers -type Func32 func(a []float32) float32 - -// Func64 is a norm function operating on slices of float64 numbers -type Func64 func(a []float64) float64 diff --git a/tensor/stats/norm/norm_test.go b/tensor/stats/norm/norm_test.go deleted file mode 100644 index b9b420e8de..0000000000 --- a/tensor/stats/norm/norm_test.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package norm - -import ( - "testing" - - "cogentcore.org/core/base/tolassert" - "cogentcore.org/core/tensor" - "github.com/stretchr/testify/assert" -) - -func TestNorm32(t *testing.T) { - vals := []float32{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1} - - zn := []float32{-1.5075567, -1.2060454, -0.90453404, -0.60302263, -0.30151132, 0, 0.3015114, 0.60302263, 0.90453404, 1.2060453, 1.5075567} - nvals := make([]float32, len(vals)) - copy(nvals, vals) - ZScore32(nvals) - assert.Equal(t, zn, nvals) - - copy(nvals, vals) - Unit32(nvals) - assert.Equal(t, vals, nvals) - - tn := []float32{0.2, 0.2, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.8, 0.8} - copy(nvals, vals) - Thresh32(nvals, true, 0.8, true, 0.2) - assert.Equal(t, tn, nvals) - - bn := []float32{0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1} - copy(nvals, vals) - Binarize32(nvals, 0.5, 1.0, 0.0) - assert.Equal(t, bn, nvals) - - tsr := tensor.New[float32]([]int{11}).(*tensor.Float32) - copy(tsr.Values, vals) - TensorZScore(tsr, 0) - tolassert.EqualTolSlice(t, zn, tsr.Values, 1.0e-6) - - copy(tsr.Values, vals) - TensorUnit(tsr, 0) - tolassert.EqualTolSlice(t, vals, tsr.Values, 1.0e-6) - -} - -func TestNorm64(t *testing.T) { - vals := []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1} - - zn := []float64{-1.507556722888818, -1.2060453783110545, -0.9045340337332908, -0.6030226891555273, -0.3015113445777635, 0, 0.3015113445777635, 0.603022689155527, 0.904534033733291, 1.2060453783110545, 1.507556722888818} - nvals := make([]float64, len(vals)) - copy(nvals, vals) - ZScore64(nvals) - assert.Equal(t, zn, nvals) - - copy(nvals, vals) - Unit64(nvals) - assert.Equal(t, vals, nvals) - - tn := []float64{0.2, 0.2, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.8, 0.8} - copy(nvals, vals) - Thresh64(nvals, true, 0.8, true, 0.2) - assert.Equal(t, tn, nvals) - - bn := []float64{0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1} - copy(nvals, vals) - Binarize64(nvals, 0.5, 1.0, 0.0) - assert.Equal(t, bn, nvals) - - tsr := tensor.New[float64]([]int{11}).(*tensor.Float64) - copy(tsr.Values, vals) - TensorZScore(tsr, 0) - tolassert.EqualTolSlice(t, zn, tsr.Values, 1.0e-6) - - copy(tsr.Values, vals) - TensorUnit(tsr, 0) - tolassert.EqualTolSlice(t, vals, tsr.Values, 1.0e-6) - -} diff --git a/tensor/stats/norm/tensor.go b/tensor/stats/norm/tensor.go deleted file mode 100644 index ffc00db8f7..0000000000 --- a/tensor/stats/norm/tensor.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package norm - -import ( - "cogentcore.org/core/tensor" - "cogentcore.org/core/tensor/stats/stats" -) - -/////////////////////////////////////////// -// DivNorm - -// TensorDivNorm does divisive normalization by given norm function -// computed on the first ndim dims of the tensor, where 0 = all values, -// 1 = norm each of the sub-dimensions under the first outer-most dimension etc. -// ndim must be < NumDims() if not 0. -func TensorDivNorm(tsr tensor.Tensor, ndim int, nfunc32 Func32, nfunc64 Func64) { - if ndim == 0 { - FloatFunc(tsr, nfunc32, nfunc64, 0, 0, DivNorm32, DivNorm64) - } - if ndim >= tsr.NumDims() { - panic("norm.TensorSubNorm32: number of dims must be < NumDims()") - } - sln := 1 - ln := tsr.Len() - for i := 0; i < ndim; i++ { - sln *= tsr.Shape().DimSize(i) - } - dln := ln / sln - for sl := 0; sl < sln; sl++ { - st := sl * dln - FloatFunc(tsr, nfunc32, nfunc64, st, dln, DivNorm32, DivNorm64) - } -} - -/////////////////////////////////////////// -// SubNorm - -// TensorSubNorm does subtractive normalization by given norm function -// computed on the first ndim dims of the tensor, where 0 = all values, -// 1 = norm each of the sub-dimensions under the first outer-most dimension etc. -// ndim must be < NumDims() if not 0 (panics). -func TensorSubNorm(tsr tensor.Tensor, ndim int, nfunc32 Func32, nfunc64 Func64) { - if ndim == 0 { - FloatFunc(tsr, nfunc32, nfunc64, 0, 0, SubNorm32, SubNorm64) - } - if ndim >= tsr.NumDims() { - panic("norm.TensorSubNorm32: number of dims must be < NumDims()") - } - sln := 1 - ln := tsr.Len() - for i := 0; i < ndim; i++ { - sln *= tsr.Shape().DimSize(i) - } - dln := ln / sln - for sl := 0; sl < sln; sl++ { - st := sl * dln - FloatFunc(tsr, nfunc32, nfunc64, st, dln, SubNorm32, SubNorm64) - } -} - -// TensorZScore subtracts the mean and divides by the standard deviation -// computed on the first ndim dims of the tensor, where 0 = all values, -// 1 = norm each of the sub-dimensions under the first outer-most dimension etc. -// ndim must be < NumDims() if not 0 (panics). -// must be a float32 or float64 tensor -func TensorZScore(tsr tensor.Tensor, ndim int) { - TensorSubNorm(tsr, ndim, stats.Mean32, stats.Mean64) - TensorDivNorm(tsr, ndim, stats.Std32, stats.Std64) -} - -// TensorUnit subtracts the min and divides by the max, so that values are in 0-1 unit range -// computed on the first ndim dims of the tensor, where 0 = all values, -// 1 = norm each of the sub-dimensions under the first outer-most dimension etc. -// ndim must be < NumDims() if not 0 (panics). -// must be a float32 or float64 tensor -func TensorUnit(tsr tensor.Tensor, ndim int) { - TensorSubNorm(tsr, ndim, stats.Min32, stats.Min64) - TensorDivNorm(tsr, ndim, stats.Max32, stats.Max64) -} diff --git a/tensor/stats/pca/README.md b/tensor/stats/pca/README.md deleted file mode 100644 index 5f3f7b4d63..0000000000 --- a/tensor/stats/pca/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# pca - -This performs principal component's analysis and associated covariance matrix computations, operating on `table.Table` or `tensor.Tensor` data, using the [gonum](https://github.com/gonum/gonum) matrix interface. - -There is support for the SVD version, which is much faster and produces the same results, with options for how much information to compute trading off with compute time. - - diff --git a/tensor/stats/pca/covar.go b/tensor/stats/pca/covar.go deleted file mode 100644 index 5aa1e9f0d0..0000000000 --- a/tensor/stats/pca/covar.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pca - -import ( - "fmt" - - "cogentcore.org/core/tensor" - "cogentcore.org/core/tensor/stats/metric" - "cogentcore.org/core/tensor/table" -) - -// CovarTableColumn generates a covariance matrix from given column name -// in given IndexView of an table.Table, and given metric function -// (typically Covariance or Correlation -- use Covar if vars have similar -// overall scaling, which is typical in neural network models, and use -// Correl if they are on very different scales -- Correl effectively rescales). -// A Covariance matrix computes the *row-wise* vector similarities for each -// pairwise combination of column cells -- i.e., the extent to which each -// cell co-varies in its value with each other cell across the rows of the table. -// This is the input to the PCA eigenvalue decomposition of the resulting -// covariance matrix. -func CovarTableColumn(cmat tensor.Tensor, ix *table.IndexView, column string, mfun metric.Func64) error { - col, err := ix.Table.ColumnByName(column) - if err != nil { - return err - } - rows := ix.Len() - nd := col.NumDims() - if nd < 2 || rows == 0 { - return fmt.Errorf("pca.CovarTableColumn: must have 2 or more dims and rows != 0") - } - ln := col.Len() - sz := ln / col.DimSize(0) // size of cell - - cshp := []int{sz, sz} - cmat.SetShape(cshp) - - av := make([]float64, rows) - bv := make([]float64, rows) - sdim := []int{0, 0} - for ai := 0; ai < sz; ai++ { - sdim[0] = ai - TableColumnRowsVec(av, ix, col, ai) - for bi := 0; bi <= ai; bi++ { // lower diag - sdim[1] = bi - TableColumnRowsVec(bv, ix, col, bi) - cv := mfun(av, bv) - cmat.SetFloat(sdim, cv) - } - } - // now fill in upper diagonal with values from lower diagonal - // note: assumes symmetric distance function - fdim := []int{0, 0} - for ai := 0; ai < sz; ai++ { - sdim[0] = ai - fdim[1] = ai - for bi := ai + 1; bi < sz; bi++ { // upper diag - fdim[0] = bi - sdim[1] = bi - cv := cmat.Float(fdim) - cmat.SetFloat(sdim, cv) - } - } - - if nm, has := ix.Table.MetaData["name"]; has { - cmat.SetMetaData("name", nm+"_"+column) - } else { - cmat.SetMetaData("name", column) - } - if ds, has := ix.Table.MetaData["desc"]; has { - cmat.SetMetaData("desc", ds) - } - return nil -} - -// CovarTensor generates a covariance matrix from given tensor.Tensor, -// where the outer-most dimension is rows, and all other dimensions within that -// are covaried against each other, using given metric function -// (typically Covariance or Correlation -- use Covar if vars have similar -// overall scaling, which is typical in neural network models, and use -// Correl if they are on very different scales -- Correl effectively rescales). -// A Covariance matrix computes the *row-wise* vector similarities for each -// pairwise combination of column cells -- i.e., the extent to which each -// cell co-varies in its value with each other cell across the rows of the table. -// This is the input to the PCA eigenvalue decomposition of the resulting -// covariance matrix. -func CovarTensor(cmat tensor.Tensor, tsr tensor.Tensor, mfun metric.Func64) error { - rows := tsr.DimSize(0) - nd := tsr.NumDims() - if nd < 2 || rows == 0 { - return fmt.Errorf("pca.CovarTensor: must have 2 or more dims and rows != 0") - } - ln := tsr.Len() - sz := ln / rows - - cshp := []int{sz, sz} - cmat.SetShape(cshp) - - av := make([]float64, rows) - bv := make([]float64, rows) - sdim := []int{0, 0} - for ai := 0; ai < sz; ai++ { - sdim[0] = ai - TensorRowsVec(av, tsr, ai) - for bi := 0; bi <= ai; bi++ { // lower diag - sdim[1] = bi - TensorRowsVec(bv, tsr, bi) - cv := mfun(av, bv) - cmat.SetFloat(sdim, cv) - } - } - // now fill in upper diagonal with values from lower diagonal - // note: assumes symmetric distance function - fdim := []int{0, 0} - for ai := 0; ai < sz; ai++ { - sdim[0] = ai - fdim[1] = ai - for bi := ai + 1; bi < sz; bi++ { // upper diag - fdim[0] = bi - sdim[1] = bi - cv := cmat.Float(fdim) - cmat.SetFloat(sdim, cv) - } - } - - if nm, has := tsr.MetaData("name"); has { - cmat.SetMetaData("name", nm+"Covar") - } else { - cmat.SetMetaData("name", "Covar") - } - if ds, has := tsr.MetaData("desc"); has { - cmat.SetMetaData("desc", ds) - } - return nil -} - -// TableColumnRowsVec extracts row-wise vector from given cell index into vec. -// vec must be of size ix.Len() -- number of rows -func TableColumnRowsVec(vec []float64, ix *table.IndexView, col tensor.Tensor, cidx int) { - rows := ix.Len() - ln := col.Len() - sz := ln / col.DimSize(0) // size of cell - for ri := 0; ri < rows; ri++ { - coff := ix.Indexes[ri]*sz + cidx - vec[ri] = col.Float1D(coff) - } -} - -// TensorRowsVec extracts row-wise vector from given cell index into vec. -// vec must be of size tsr.DimSize(0) -- number of rows -func TensorRowsVec(vec []float64, tsr tensor.Tensor, cidx int) { - rows := tsr.DimSize(0) - ln := tsr.Len() - sz := ln / rows - for ri := 0; ri < rows; ri++ { - coff := ri*sz + cidx - vec[ri] = tsr.Float1D(coff) - } -} - -// CovarTableColumnStd generates a covariance matrix from given column name -// in given IndexView of an table.Table, and given metric function -// (typically Covariance or Correlation -- use Covar if vars have similar -// overall scaling, which is typical in neural network models, and use -// Correl if they are on very different scales -- Correl effectively rescales). -// A Covariance matrix computes the *row-wise* vector similarities for each -// pairwise combination of column cells -- i.e., the extent to which each -// cell co-varies in its value with each other cell across the rows of the table. -// This is the input to the PCA eigenvalue decomposition of the resulting -// covariance matrix. -// This Std version is usable e.g., in Python where the func cannot be passed. -func CovarTableColumnStd(cmat tensor.Tensor, ix *table.IndexView, column string, met metric.StdMetrics) error { - return CovarTableColumn(cmat, ix, column, metric.StdFunc64(met)) -} - -// CovarTensorStd generates a covariance matrix from given tensor.Tensor, -// where the outer-most dimension is rows, and all other dimensions within that -// are covaried against each other, using given metric function -// (typically Covariance or Correlation -- use Covar if vars have similar -// overall scaling, which is typical in neural network models, and use -// Correl if they are on very different scales -- Correl effectively rescales). -// A Covariance matrix computes the *row-wise* vector similarities for each -// pairwise combination of column cells -- i.e., the extent to which each -// cell co-varies in its value with each other cell across the rows of the table. -// This is the input to the PCA eigenvalue decomposition of the resulting -// covariance matrix. -// This Std version is usable e.g., in Python where the func cannot be passed. -func CovarTensorStd(cmat tensor.Tensor, tsr tensor.Tensor, met metric.StdMetrics) error { - return CovarTensor(cmat, tsr, metric.StdFunc64(met)) -} diff --git a/tensor/stats/pca/doc.go b/tensor/stats/pca/doc.go deleted file mode 100644 index f1c43f22f3..0000000000 --- a/tensor/stats/pca/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package pca performs principal component's analysis and associated covariance -matrix computations, operating on table.Table or tensor.Tensor data. -*/ -package pca diff --git a/tensor/stats/pca/pca.go b/tensor/stats/pca/pca.go deleted file mode 100644 index b69b57e1a3..0000000000 --- a/tensor/stats/pca/pca.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pca - -//go:generate core generate - -import ( - "fmt" - - "cogentcore.org/core/tensor" - "cogentcore.org/core/tensor/stats/metric" - "cogentcore.org/core/tensor/table" - "gonum.org/v1/gonum/mat" -) - -// PCA computes the eigenvalue decomposition of a square similarity matrix, -// typically generated using the correlation metric. -type PCA struct { - - // the covariance matrix computed on original data, which is then eigen-factored - Covar tensor.Tensor `display:"no-inline"` - - // the eigenvectors, in same size as Covar - each eigenvector is a column in this 2D square matrix, ordered *lowest* to *highest* across the columns -- i.e., maximum eigenvector is the last column - Vectors tensor.Tensor `display:"no-inline"` - - // the eigenvalues, ordered *lowest* to *highest* - Values []float64 `display:"no-inline"` -} - -func (pa *PCA) Init() { - pa.Covar = &tensor.Float64{} - pa.Vectors = &tensor.Float64{} - pa.Values = nil -} - -// TableColumn is a convenience method that computes a covariance matrix -// on given column of table and then performs the PCA on the resulting matrix. -// If no error occurs, the results can be read out from Vectors and Values -// or used in Projection methods. -// mfun is metric function, typically Covariance or Correlation -- use Covar -// if vars have similar overall scaling, which is typical in neural network models, -// and use Correl if they are on very different scales -- Correl effectively rescales). -// A Covariance matrix computes the *row-wise* vector similarities for each -// pairwise combination of column cells -- i.e., the extent to which each -// cell co-varies in its value with each other cell across the rows of the table. -// This is the input to the PCA eigenvalue decomposition of the resulting -// covariance matrix, which extracts the eigenvectors as directions with maximal -// variance in this matrix. -func (pa *PCA) TableColumn(ix *table.IndexView, column string, mfun metric.Func64) error { - if pa.Covar == nil { - pa.Init() - } - err := CovarTableColumn(pa.Covar, ix, column, mfun) - if err != nil { - return err - } - return pa.PCA() -} - -// Tensor is a convenience method that computes a covariance matrix -// on given tensor and then performs the PCA on the resulting matrix. -// If no error occurs, the results can be read out from Vectors and Values -// or used in Projection methods. -// mfun is metric function, typically Covariance or Correlation -- use Covar -// if vars have similar overall scaling, which is typical in neural network models, -// and use Correl if they are on very different scales -- Correl effectively rescales). -// A Covariance matrix computes the *row-wise* vector similarities for each -// pairwise combination of column cells -- i.e., the extent to which each -// cell co-varies in its value with each other cell across the rows of the table. -// This is the input to the PCA eigenvalue decomposition of the resulting -// covariance matrix, which extracts the eigenvectors as directions with maximal -// variance in this matrix. -func (pa *PCA) Tensor(tsr tensor.Tensor, mfun metric.Func64) error { - if pa.Covar == nil { - pa.Init() - } - err := CovarTensor(pa.Covar, tsr, mfun) - if err != nil { - return err - } - return pa.PCA() -} - -// TableColumnStd is a convenience method that computes a covariance matrix -// on given column of table and then performs the PCA on the resulting matrix. -// If no error occurs, the results can be read out from Vectors and Values -// or used in Projection methods. -// mfun is a Std metric function, typically Covariance or Correlation -- use Covar -// if vars have similar overall scaling, which is typical in neural network models, -// and use Correl if they are on very different scales -- Correl effectively rescales). -// A Covariance matrix computes the *row-wise* vector similarities for each -// pairwise combination of column cells -- i.e., the extent to which each -// cell co-varies in its value with each other cell across the rows of the table. -// This is the input to the PCA eigenvalue decomposition of the resulting -// covariance matrix, which extracts the eigenvectors as directions with maximal -// variance in this matrix. -// This Std version is usable e.g., in Python where the func cannot be passed. -func (pa *PCA) TableColumnStd(ix *table.IndexView, column string, met metric.StdMetrics) error { - return pa.TableColumn(ix, column, metric.StdFunc64(met)) -} - -// TensorStd is a convenience method that computes a covariance matrix -// on given tensor and then performs the PCA on the resulting matrix. -// If no error occurs, the results can be read out from Vectors and Values -// or used in Projection methods. -// mfun is Std metric function, typically Covariance or Correlation -- use Covar -// if vars have similar overall scaling, which is typical in neural network models, -// and use Correl if they are on very different scales -- Correl effectively rescales). -// A Covariance matrix computes the *row-wise* vector similarities for each -// pairwise combination of column cells -- i.e., the extent to which each -// cell co-varies in its value with each other cell across the rows of the table. -// This is the input to the PCA eigenvalue decomposition of the resulting -// covariance matrix, which extracts the eigenvectors as directions with maximal -// variance in this matrix. -// This Std version is usable e.g., in Python where the func cannot be passed. -func (pa *PCA) TensorStd(tsr tensor.Tensor, met metric.StdMetrics) error { - return pa.Tensor(tsr, metric.StdFunc64(met)) -} - -// PCA performs the eigen decomposition of the existing Covar matrix. -// Vectors and Values fields contain the results. -func (pa *PCA) PCA() error { - if pa.Covar == nil || pa.Covar.NumDims() != 2 { - return fmt.Errorf("pca.PCA: Covar matrix is nil or not 2D") - } - var eig mat.EigenSym - // note: MUST be a Float64 otherwise doesn't have Symmetric function - ok := eig.Factorize(pa.Covar.(*tensor.Float64), true) - if !ok { - return fmt.Errorf("gonum EigenSym Factorize failed") - } - if pa.Vectors == nil { - pa.Vectors = &tensor.Float64{} - } - var ev mat.Dense - eig.VectorsTo(&ev) - tensor.CopyDense(pa.Vectors, &ev) - nr := pa.Vectors.DimSize(0) - if len(pa.Values) != nr { - pa.Values = make([]float64, nr) - } - eig.Values(pa.Values) - return nil -} - -// ProjectColumn projects values from the given column of given table (via IndexView) -// onto the idx'th eigenvector (0 = largest eigenvalue, 1 = next, etc). -// Must have already called PCA() method. -func (pa *PCA) ProjectColumn(vals *[]float64, ix *table.IndexView, column string, idx int) error { - col, err := ix.Table.ColumnByName(column) - if err != nil { - return err - } - if pa.Vectors == nil { - return fmt.Errorf("PCA.ProjectColumn Vectors are nil -- must call PCA first") - } - nr := pa.Vectors.DimSize(0) - if idx >= nr { - return fmt.Errorf("PCA.ProjectColumn eigenvector index > rank of matrix") - } - cvec := make([]float64, nr) - eidx := nr - 1 - idx // eigens in reverse order - vec := pa.Vectors.(*tensor.Float64) - for ri := 0; ri < nr; ri++ { - cvec[ri] = vec.Value([]int{ri, eidx}) // vecs are in columns, reverse magnitude order - } - rows := ix.Len() - if len(*vals) != rows { - *vals = make([]float64, rows) - } - ln := col.Len() - sz := ln / col.DimSize(0) // size of cell - if sz != nr { - return fmt.Errorf("PCA.ProjectColumn column cell size != pca eigenvectors") - } - rdim := []int{0} - for row := 0; row < rows; row++ { - sum := 0.0 - rdim[0] = ix.Indexes[row] - rt := col.SubSpace(rdim) - for ci := 0; ci < sz; ci++ { - sum += cvec[ci] * rt.Float1D(ci) - } - (*vals)[row] = sum - } - return nil -} - -// ProjectColumnToTable projects values from the given column of given table (via IndexView) -// onto the given set of eigenvectors (idxs, 0 = largest eigenvalue, 1 = next, etc), -// and stores results along with labels from column labNm into results table. -// Must have already called PCA() method. -func (pa *PCA) ProjectColumnToTable(projections *table.Table, ix *table.IndexView, column, labNm string, idxs []int) error { - _, err := ix.Table.ColumnByName(column) - if err != nil { - return err - } - if pa.Vectors == nil { - return fmt.Errorf("PCA.ProjectColumn Vectors are nil -- must call PCA first") - } - rows := ix.Len() - projections.DeleteAll() - pcolSt := 0 - if labNm != "" { - projections.AddStringColumn(labNm) - pcolSt = 1 - } - for _, idx := range idxs { - projections.AddFloat64Column(fmt.Sprintf("Projection%v", idx)) - } - projections.SetNumRows(rows) - - for ii, idx := range idxs { - pcol := projections.Columns[pcolSt+ii].(*tensor.Float64) - pa.ProjectColumn(&pcol.Values, ix, column, idx) - } - - if labNm != "" { - lcol, err := ix.Table.ColumnByName(labNm) - if err == nil { - plcol := projections.Columns[0] - for row := 0; row < rows; row++ { - plcol.SetString1D(row, lcol.String1D(row)) - } - } - } - return nil -} diff --git a/tensor/stats/pca/pca_test.go b/tensor/stats/pca/pca_test.go deleted file mode 100644 index fb62388c8b..0000000000 --- a/tensor/stats/pca/pca_test.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pca - -import ( - "fmt" - "math" - "testing" - - "cogentcore.org/core/tensor/stats/metric" - "cogentcore.org/core/tensor/table" -) - -func TestPCAIris(t *testing.T) { - // note: these results are verified against this example: - // https://plot.ly/ipython-notebooks/principal-component-analysis/ - - dt := table.NewTable() - dt.AddFloat64TensorColumn("data", []int{4}) - dt.AddStringColumn("class") - err := dt.OpenCSV("testdata/iris.data", table.Comma) - if err != nil { - t.Error(err) - } - ix := table.NewIndexView(dt) - pc := &PCA{} - // pc.TableColumn(ix, "data", metric.Covariance64) - // fmt.Printf("covar: %v\n", pc.Covar) - err = pc.TableColumn(ix, "data", metric.Correlation64) - if err != nil { - t.Error(err) - } - // fmt.Printf("correl: %v\n", pc.Covar) - // fmt.Printf("correl vec: %v\n", pc.Vectors) - // fmt.Printf("correl val: %v\n", pc.Values) - - errtol := 1.0e-9 - corvals := []float64{0.020607707235624825, 0.14735327830509573, 0.9212209307072254, 2.910818083752054} - for i, v := range pc.Values { - dif := math.Abs(corvals[i] - v) - if dif > errtol { - err = fmt.Errorf("eigenvalue: %v differs from correct: %v was: %v", i, corvals[i], v) - t.Error(err) - } - } - - prjt := &table.Table{} - err = pc.ProjectColumnToTable(prjt, ix, "data", "class", []int{0, 1}) - if err != nil { - t.Error(err) - } - // prjt.SaveCSV("test_data/projection01.csv", table.Comma, true) -} diff --git a/tensor/stats/pca/svd.go b/tensor/stats/pca/svd.go deleted file mode 100644 index d94ab60041..0000000000 --- a/tensor/stats/pca/svd.go +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pca - -import ( - "fmt" - - "cogentcore.org/core/base/errors" - "cogentcore.org/core/tensor" - "cogentcore.org/core/tensor/stats/metric" - "cogentcore.org/core/tensor/table" - "gonum.org/v1/gonum/mat" -) - -// SVD computes the eigenvalue decomposition of a square similarity matrix, -// typically generated using the correlation metric. -type SVD struct { - - // type of SVD to run: SVDNone is the most efficient if you only need the values which are always computed. Otherwise, SVDThin is the next most efficient for getting approximate vectors - Kind mat.SVDKind - - // condition value -- minimum normalized eigenvalue to return in values - Cond float64 `default:"0.01"` - - // the rank (count) of singular values greater than Cond - Rank int - - // the covariance matrix computed on original data, which is then eigen-factored - Covar tensor.Tensor `display:"no-inline"` - - // the eigenvectors, in same size as Covar - each eigenvector is a column in this 2D square matrix, ordered *lowest* to *highest* across the columns -- i.e., maximum eigenvector is the last column - Vectors tensor.Tensor `display:"no-inline"` - - // the eigenvalues, ordered *lowest* to *highest* - Values []float64 `display:"no-inline"` -} - -func (svd *SVD) Init() { - svd.Kind = mat.SVDNone - svd.Cond = 0.01 - svd.Covar = &tensor.Float64{} - svd.Vectors = &tensor.Float64{} - svd.Values = nil -} - -// TableColumn is a convenience method that computes a covariance matrix -// on given column of table and then performs the SVD on the resulting matrix. -// If no error occurs, the results can be read out from Vectors and Values -// or used in Projection methods. -// mfun is metric function, typically Covariance or Correlation -- use Covar -// if vars have similar overall scaling, which is typical in neural network models, -// and use Correl if they are on very different scales -- Correl effectively rescales). -// A Covariance matrix computes the *row-wise* vector similarities for each -// pairwise combination of column cells -- i.e., the extent to which each -// cell co-varies in its value with each other cell across the rows of the table. -// This is the input to the SVD eigenvalue decomposition of the resulting -// covariance matrix, which extracts the eigenvectors as directions with maximal -// variance in this matrix. -func (svd *SVD) TableColumn(ix *table.IndexView, column string, mfun metric.Func64) error { - if svd.Covar == nil { - svd.Init() - } - err := CovarTableColumn(svd.Covar, ix, column, mfun) - if err != nil { - return err - } - return svd.SVD() -} - -// Tensor is a convenience method that computes a covariance matrix -// on given tensor and then performs the SVD on the resulting matrix. -// If no error occurs, the results can be read out from Vectors and Values -// or used in Projection methods. -// mfun is metric function, typically Covariance or Correlation -- use Covar -// if vars have similar overall scaling, which is typical in neural network models, -// and use Correl if they are on very different scales -- Correl effectively rescales). -// A Covariance matrix computes the *row-wise* vector similarities for each -// pairwise combination of column cells -- i.e., the extent to which each -// cell co-varies in its value with each other cell across the rows of the table. -// This is the input to the SVD eigenvalue decomposition of the resulting -// covariance matrix, which extracts the eigenvectors as directions with maximal -// variance in this matrix. -func (svd *SVD) Tensor(tsr tensor.Tensor, mfun metric.Func64) error { - if svd.Covar == nil { - svd.Init() - } - err := CovarTensor(svd.Covar, tsr, mfun) - if err != nil { - return err - } - return svd.SVD() -} - -// TableColumnStd is a convenience method that computes a covariance matrix -// on given column of table and then performs the SVD on the resulting matrix. -// If no error occurs, the results can be read out from Vectors and Values -// or used in Projection methods. -// mfun is a Std metric function, typically Covariance or Correlation -- use Covar -// if vars have similar overall scaling, which is typical in neural network models, -// and use Correl if they are on very different scales -- Correl effectively rescales). -// A Covariance matrix computes the *row-wise* vector similarities for each -// pairwise combination of column cells -- i.e., the extent to which each -// cell co-varies in its value with each other cell across the rows of the table. -// This is the input to the SVD eigenvalue decomposition of the resulting -// covariance matrix, which extracts the eigenvectors as directions with maximal -// variance in this matrix. -// This Std version is usable e.g., in Python where the func cannot be passed. -func (svd *SVD) TableColumnStd(ix *table.IndexView, column string, met metric.StdMetrics) error { - return svd.TableColumn(ix, column, metric.StdFunc64(met)) -} - -// TensorStd is a convenience method that computes a covariance matrix -// on given tensor and then performs the SVD on the resulting matrix. -// If no error occurs, the results can be read out from Vectors and Values -// or used in Projection methods. -// mfun is Std metric function, typically Covariance or Correlation -- use Covar -// if vars have similar overall scaling, which is typical in neural network models, -// and use Correl if they are on very different scales -- Correl effectively rescales). -// A Covariance matrix computes the *row-wise* vector similarities for each -// pairwise combination of column cells -- i.e., the extent to which each -// cell co-varies in its value with each other cell across the rows of the table. -// This is the input to the SVD eigenvalue decomposition of the resulting -// covariance matrix, which extracts the eigenvectors as directions with maximal -// variance in this matrix. -// This Std version is usable e.g., in Python where the func cannot be passed. -func (svd *SVD) TensorStd(tsr tensor.Tensor, met metric.StdMetrics) error { - return svd.Tensor(tsr, metric.StdFunc64(met)) -} - -// SVD performs the eigen decomposition of the existing Covar matrix. -// Vectors and Values fields contain the results. -func (svd *SVD) SVD() error { - if svd.Covar == nil || svd.Covar.NumDims() != 2 { - return fmt.Errorf("svd.SVD: Covar matrix is nil or not 2D") - } - var eig mat.SVD - // note: MUST be a Float64 otherwise doesn't have Symmetric function - ok := eig.Factorize(svd.Covar, svd.Kind) - if !ok { - return fmt.Errorf("gonum SVD Factorize failed") - } - if svd.Kind > mat.SVDNone { - if svd.Vectors == nil { - svd.Vectors = &tensor.Float64{} - } - var ev mat.Dense - eig.UTo(&ev) - tensor.CopyDense(svd.Vectors, &ev) - } - nr := svd.Covar.DimSize(0) - if len(svd.Values) != nr { - svd.Values = make([]float64, nr) - } - eig.Values(svd.Values) - svd.Rank = eig.Rank(svd.Cond) - return nil -} - -// ProjectColumn projects values from the given column of given table (via IndexView) -// onto the idx'th eigenvector (0 = largest eigenvalue, 1 = next, etc). -// Must have already called SVD() method. -func (svd *SVD) ProjectColumn(vals *[]float64, ix *table.IndexView, column string, idx int) error { - col, err := ix.Table.ColumnByName(column) - if err != nil { - return err - } - if svd.Vectors == nil || svd.Vectors.Len() == 0 { - return fmt.Errorf("SVD.ProjectColumn Vectors are nil: must call SVD first, with Kind = mat.SVDFull so that the vectors are returned") - } - nr := svd.Vectors.DimSize(0) - if idx >= nr { - return fmt.Errorf("SVD.ProjectColumn eigenvector index > rank of matrix") - } - cvec := make([]float64, nr) - // eidx := nr - 1 - idx // eigens in reverse order - vec := svd.Vectors.(*tensor.Float64) - for ri := 0; ri < nr; ri++ { - cvec[ri] = vec.Value([]int{ri, idx}) // vecs are in columns, reverse magnitude order - } - rows := ix.Len() - if len(*vals) != rows { - *vals = make([]float64, rows) - } - ln := col.Len() - sz := ln / col.DimSize(0) // size of cell - if sz != nr { - return fmt.Errorf("SVD.ProjectColumn column cell size != svd eigenvectors") - } - rdim := []int{0} - for row := 0; row < rows; row++ { - sum := 0.0 - rdim[0] = ix.Indexes[row] - rt := col.SubSpace(rdim) - for ci := 0; ci < sz; ci++ { - sum += cvec[ci] * rt.Float1D(ci) - } - (*vals)[row] = sum - } - return nil -} - -// ProjectColumnToTable projects values from the given column of given table (via IndexView) -// onto the given set of eigenvectors (idxs, 0 = largest eigenvalue, 1 = next, etc), -// and stores results along with labels from column labNm into results table. -// Must have already called SVD() method. -func (svd *SVD) ProjectColumnToTable(projections *table.Table, ix *table.IndexView, column, labNm string, idxs []int) error { - _, err := ix.Table.ColumnByName(column) - if errors.Log(err) != nil { - return err - } - if svd.Vectors == nil { - return fmt.Errorf("SVD.ProjectColumn Vectors are nil -- must call SVD first") - } - rows := ix.Len() - projections.DeleteAll() - pcolSt := 0 - if labNm != "" { - projections.AddStringColumn(labNm) - pcolSt = 1 - } - for _, idx := range idxs { - projections.AddFloat64Column(fmt.Sprintf("Projection%v", idx)) - } - projections.SetNumRows(rows) - - for ii, idx := range idxs { - pcol := projections.Columns[pcolSt+ii].(*tensor.Float64) - svd.ProjectColumn(&pcol.Values, ix, column, idx) - } - - if labNm != "" { - lcol, err := ix.Table.ColumnByName(labNm) - if errors.Log(err) == nil { - plcol := projections.Columns[0] - for row := 0; row < rows; row++ { - plcol.SetString1D(row, lcol.String1D(row)) - } - } - } - return nil -} diff --git a/tensor/stats/pca/svd_test.go b/tensor/stats/pca/svd_test.go deleted file mode 100644 index f1caad8b7c..0000000000 --- a/tensor/stats/pca/svd_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pca - -import ( - "fmt" - "math" - "testing" - - "cogentcore.org/core/tensor/stats/metric" - "cogentcore.org/core/tensor/table" - "gonum.org/v1/gonum/mat" -) - -func TestSVDIris(t *testing.T) { - // note: these results are verified against this example: - // https://plot.ly/ipython-notebooks/principal-component-analysis/ - - dt := table.NewTable() - dt.AddFloat64TensorColumn("data", []int{4}) - dt.AddStringColumn("class") - err := dt.OpenCSV("testdata/iris.data", table.Comma) - if err != nil { - t.Error(err) - } - ix := table.NewIndexView(dt) - pc := &SVD{} - pc.Init() - pc.Kind = mat.SVDFull - // pc.TableColumn(ix, "data", metric.Covariance64) - // fmt.Printf("covar: %v\n", pc.Covar) - err = pc.TableColumn(ix, "data", metric.Correlation64) - if err != nil { - t.Error(err) - } - // fmt.Printf("correl: %v\n", pc.Covar) - // fmt.Printf("correl vec: %v\n", pc.Vectors) - // fmt.Printf("correl val: %v\n", pc.Values) - - errtol := 1.0e-9 - corvals := []float64{2.910818083752054, 0.9212209307072254, 0.14735327830509573, 0.020607707235624825} - for i, v := range pc.Values { - dif := math.Abs(corvals[i] - v) - if dif > errtol { - err = fmt.Errorf("eigenvalue: %v differs from correct: %v was: %v", i, corvals[i], v) - t.Error(err) - } - } - - prjt := &table.Table{} - err = pc.ProjectColumnToTable(prjt, ix, "data", "class", []int{0, 1}) - if err != nil { - t.Error(err) - } - // prjt.SaveCSV("test_data/svd_projection01.csv", table.Comma, true) -} diff --git a/tensor/stats/pca/testdata/iris.data b/tensor/stats/pca/testdata/iris.data deleted file mode 100644 index a3490e0e07..0000000000 --- a/tensor/stats/pca/testdata/iris.data +++ /dev/null @@ -1,150 +0,0 @@ -5.1,3.5,1.4,0.2,Iris-setosa -4.9,3.0,1.4,0.2,Iris-setosa -4.7,3.2,1.3,0.2,Iris-setosa -4.6,3.1,1.5,0.2,Iris-setosa -5.0,3.6,1.4,0.2,Iris-setosa -5.4,3.9,1.7,0.4,Iris-setosa -4.6,3.4,1.4,0.3,Iris-setosa -5.0,3.4,1.5,0.2,Iris-setosa -4.4,2.9,1.4,0.2,Iris-setosa -4.9,3.1,1.5,0.1,Iris-setosa -5.4,3.7,1.5,0.2,Iris-setosa -4.8,3.4,1.6,0.2,Iris-setosa -4.8,3.0,1.4,0.1,Iris-setosa -4.3,3.0,1.1,0.1,Iris-setosa -5.8,4.0,1.2,0.2,Iris-setosa -5.7,4.4,1.5,0.4,Iris-setosa -5.4,3.9,1.3,0.4,Iris-setosa -5.1,3.5,1.4,0.3,Iris-setosa -5.7,3.8,1.7,0.3,Iris-setosa -5.1,3.8,1.5,0.3,Iris-setosa -5.4,3.4,1.7,0.2,Iris-setosa -5.1,3.7,1.5,0.4,Iris-setosa -4.6,3.6,1.0,0.2,Iris-setosa -5.1,3.3,1.7,0.5,Iris-setosa -4.8,3.4,1.9,0.2,Iris-setosa -5.0,3.0,1.6,0.2,Iris-setosa -5.0,3.4,1.6,0.4,Iris-setosa -5.2,3.5,1.5,0.2,Iris-setosa -5.2,3.4,1.4,0.2,Iris-setosa -4.7,3.2,1.6,0.2,Iris-setosa -4.8,3.1,1.6,0.2,Iris-setosa -5.4,3.4,1.5,0.4,Iris-setosa -5.2,4.1,1.5,0.1,Iris-setosa -5.5,4.2,1.4,0.2,Iris-setosa -4.9,3.1,1.5,0.1,Iris-setosa -5.0,3.2,1.2,0.2,Iris-setosa -5.5,3.5,1.3,0.2,Iris-setosa -4.9,3.1,1.5,0.1,Iris-setosa -4.4,3.0,1.3,0.2,Iris-setosa -5.1,3.4,1.5,0.2,Iris-setosa -5.0,3.5,1.3,0.3,Iris-setosa -4.5,2.3,1.3,0.3,Iris-setosa -4.4,3.2,1.3,0.2,Iris-setosa -5.0,3.5,1.6,0.6,Iris-setosa -5.1,3.8,1.9,0.4,Iris-setosa -4.8,3.0,1.4,0.3,Iris-setosa -5.1,3.8,1.6,0.2,Iris-setosa -4.6,3.2,1.4,0.2,Iris-setosa -5.3,3.7,1.5,0.2,Iris-setosa -5.0,3.3,1.4,0.2,Iris-setosa -7.0,3.2,4.7,1.4,Iris-versicolor -6.4,3.2,4.5,1.5,Iris-versicolor -6.9,3.1,4.9,1.5,Iris-versicolor -5.5,2.3,4.0,1.3,Iris-versicolor -6.5,2.8,4.6,1.5,Iris-versicolor -5.7,2.8,4.5,1.3,Iris-versicolor -6.3,3.3,4.7,1.6,Iris-versicolor -4.9,2.4,3.3,1.0,Iris-versicolor -6.6,2.9,4.6,1.3,Iris-versicolor -5.2,2.7,3.9,1.4,Iris-versicolor -5.0,2.0,3.5,1.0,Iris-versicolor -5.9,3.0,4.2,1.5,Iris-versicolor -6.0,2.2,4.0,1.0,Iris-versicolor -6.1,2.9,4.7,1.4,Iris-versicolor -5.6,2.9,3.6,1.3,Iris-versicolor -6.7,3.1,4.4,1.4,Iris-versicolor -5.6,3.0,4.5,1.5,Iris-versicolor -5.8,2.7,4.1,1.0,Iris-versicolor -6.2,2.2,4.5,1.5,Iris-versicolor -5.6,2.5,3.9,1.1,Iris-versicolor -5.9,3.2,4.8,1.8,Iris-versicolor -6.1,2.8,4.0,1.3,Iris-versicolor -6.3,2.5,4.9,1.5,Iris-versicolor -6.1,2.8,4.7,1.2,Iris-versicolor -6.4,2.9,4.3,1.3,Iris-versicolor -6.6,3.0,4.4,1.4,Iris-versicolor -6.8,2.8,4.8,1.4,Iris-versicolor -6.7,3.0,5.0,1.7,Iris-versicolor -6.0,2.9,4.5,1.5,Iris-versicolor -5.7,2.6,3.5,1.0,Iris-versicolor -5.5,2.4,3.8,1.1,Iris-versicolor -5.5,2.4,3.7,1.0,Iris-versicolor -5.8,2.7,3.9,1.2,Iris-versicolor -6.0,2.7,5.1,1.6,Iris-versicolor -5.4,3.0,4.5,1.5,Iris-versicolor -6.0,3.4,4.5,1.6,Iris-versicolor -6.7,3.1,4.7,1.5,Iris-versicolor -6.3,2.3,4.4,1.3,Iris-versicolor -5.6,3.0,4.1,1.3,Iris-versicolor -5.5,2.5,4.0,1.3,Iris-versicolor -5.5,2.6,4.4,1.2,Iris-versicolor -6.1,3.0,4.6,1.4,Iris-versicolor -5.8,2.6,4.0,1.2,Iris-versicolor -5.0,2.3,3.3,1.0,Iris-versicolor -5.6,2.7,4.2,1.3,Iris-versicolor -5.7,3.0,4.2,1.2,Iris-versicolor -5.7,2.9,4.2,1.3,Iris-versicolor -6.2,2.9,4.3,1.3,Iris-versicolor -5.1,2.5,3.0,1.1,Iris-versicolor -5.7,2.8,4.1,1.3,Iris-versicolor -6.3,3.3,6.0,2.5,Iris-virginica -5.8,2.7,5.1,1.9,Iris-virginica -7.1,3.0,5.9,2.1,Iris-virginica -6.3,2.9,5.6,1.8,Iris-virginica -6.5,3.0,5.8,2.2,Iris-virginica -7.6,3.0,6.6,2.1,Iris-virginica -4.9,2.5,4.5,1.7,Iris-virginica -7.3,2.9,6.3,1.8,Iris-virginica -6.7,2.5,5.8,1.8,Iris-virginica -7.2,3.6,6.1,2.5,Iris-virginica -6.5,3.2,5.1,2.0,Iris-virginica -6.4,2.7,5.3,1.9,Iris-virginica -6.8,3.0,5.5,2.1,Iris-virginica -5.7,2.5,5.0,2.0,Iris-virginica -5.8,2.8,5.1,2.4,Iris-virginica -6.4,3.2,5.3,2.3,Iris-virginica -6.5,3.0,5.5,1.8,Iris-virginica -7.7,3.8,6.7,2.2,Iris-virginica -7.7,2.6,6.9,2.3,Iris-virginica -6.0,2.2,5.0,1.5,Iris-virginica -6.9,3.2,5.7,2.3,Iris-virginica -5.6,2.8,4.9,2.0,Iris-virginica -7.7,2.8,6.7,2.0,Iris-virginica -6.3,2.7,4.9,1.8,Iris-virginica -6.7,3.3,5.7,2.1,Iris-virginica -7.2,3.2,6.0,1.8,Iris-virginica -6.2,2.8,4.8,1.8,Iris-virginica -6.1,3.0,4.9,1.8,Iris-virginica -6.4,2.8,5.6,2.1,Iris-virginica -7.2,3.0,5.8,1.6,Iris-virginica -7.4,2.8,6.1,1.9,Iris-virginica -7.9,3.8,6.4,2.0,Iris-virginica -6.4,2.8,5.6,2.2,Iris-virginica -6.3,2.8,5.1,1.5,Iris-virginica -6.1,2.6,5.6,1.4,Iris-virginica -7.7,3.0,6.1,2.3,Iris-virginica -6.3,3.4,5.6,2.4,Iris-virginica -6.4,3.1,5.5,1.8,Iris-virginica -6.0,3.0,4.8,1.8,Iris-virginica -6.9,3.1,5.4,2.1,Iris-virginica -6.7,3.1,5.6,2.4,Iris-virginica -6.9,3.1,5.1,2.3,Iris-virginica -5.8,2.7,5.1,1.9,Iris-virginica -6.8,3.2,5.9,2.3,Iris-virginica -6.7,3.3,5.7,2.5,Iris-virginica -6.7,3.0,5.2,2.3,Iris-virginica -6.3,2.5,5.0,1.9,Iris-virginica -6.5,3.0,5.2,2.0,Iris-virginica -6.2,3.4,5.4,2.3,Iris-virginica -5.9,3.0,5.1,1.8,Iris-virginica diff --git a/tensor/stats/pca/testdata/projection01.csv b/tensor/stats/pca/testdata/projection01.csv deleted file mode 100644 index 49ef2cc2de..0000000000 --- a/tensor/stats/pca/testdata/projection01.csv +++ /dev/null @@ -1,151 +0,0 @@ -_H:,$class,#Prjn0,#Prjn1 -_D:,Iris-setosa,2.669230878293515,5.180887223993903 -_D:,Iris-setosa,2.6964340118689534,4.643645304250262 -_D:,Iris-setosa,2.4811633041648684,4.752183452725602 -_D:,Iris-setosa,2.5715124347750256,4.6266149223441255 -_D:,Iris-setosa,2.5906582247213548,5.236211037073636 -_D:,Iris-setosa,3.0080988099460617,5.682216917525971 -_D:,Iris-setosa,2.490941664609344,4.90871396981208 -_D:,Iris-setosa,2.7014546083439073,5.053209215928301 -_D:,Iris-setosa,2.461583693196517,4.364930473160547 -_D:,Iris-setosa,2.6716628159090594,4.731768854441222 -_D:,Iris-setosa,2.83139678191279,5.479803509512478 -_D:,Iris-setosa,2.6551056848221406,4.980855020942431 -_D:,Iris-setosa,2.5876357448399223,4.599871891007371 -_D:,Iris-setosa,2.152073732956798,4.4073842762800135 -_D:,Iris-setosa,2.786962753802378,5.900069370044279 -_D:,Iris-setosa,2.91688203729186,6.252471718236359 -_D:,Iris-setosa,2.7755972077070026,5.673779006789473 -_D:,Iris-setosa,2.72579198328178,5.187428800901795 -_D:,Iris-setosa,3.134584682611489,5.694815200208339 -_D:,Iris-setosa,2.7049109092473627,5.4672052268301075 -_D:,Iris-setosa,3.0266540576265015,5.206355516636538 -_D:,Iris-setosa,2.787807505767021,5.381191154323272 -_D:,Iris-setosa,2.1492079743192316,5.078845780997149 -_D:,Iris-setosa,3.065961378000392,5.0217290889404955 -_D:,Iris-setosa,2.829481886501435,4.987183453994805 -_D:,Iris-setosa,2.8649219750292487,4.685096095953507 -_D:,Iris-setosa,2.8727022188802023,5.068401847428211 -_D:,Iris-setosa,2.7795934408940464,5.220228538013025 -_D:,Iris-setosa,2.7478035318656753,5.12556341091417 -_D:,Iris-setosa,2.6555395058441627,4.758511885777976 -_D:,Iris-setosa,2.734112159416322,4.703188072698243 -_D:,Iris-setosa,3.023525466483502,5.215219715084075 -_D:,Iris-setosa,2.565019386717418,5.769020857593508 -_D:,Iris-setosa,2.6938310857368215,5.977704115236996 -_D:,Iris-setosa,2.6716628159090594,4.731768854441222 -_D:,Iris-setosa,2.579749389727401,4.8617694840464685 -_D:,Iris-setosa,2.8200541258968146,5.327705091649766 -_D:,Iris-setosa,2.6716628159090594,4.731768854441222 -_D:,Iris-setosa,2.3771228011053585,4.455376644891153 -_D:,Iris-setosa,2.7536917703846737,5.090441052263297 -_D:,Iris-setosa,2.615429420681249,5.148087486882674 -_D:,Iris-setosa,2.670269508854147,3.8512605122309354 -_D:,Iris-setosa,2.3244518180425704,4.640487943720612 -_D:,Iris-setosa,2.959488937325338,5.174040650658726 -_D:,Iris-setosa,2.993973616474687,5.482184714474499 -_D:,Iris-setosa,2.700757954816452,4.612955044823157 -_D:,Iris-setosa,2.706475204818863,5.462773127606339 -_D:,Iris-setosa,2.487051542683867,4.7170610940747295 -_D:,Iris-setosa,2.7791596198720234,5.44257167317748 -_D:,Iris-setosa,2.669664699315537,4.958544088829447 -_D:,Iris-versicolor,6.337614909993668,5.758736852585482 -_D:,Iris-versicolor,5.964502241617807,5.537668456115144 -_D:,Iris-versicolor,6.48452514559209,5.639709899111897 -_D:,Iris-versicolor,5.327637994258105,4.345950542131197 -_D:,Iris-versicolor,6.180206770343914,5.206787172475347 -_D:,Iris-versicolor,5.5910618634814915,4.893739850295462 -_D:,Iris-versicolor,6.058741494153441,5.603752801471018 -_D:,Iris-versicolor,4.411318411598967,4.180724099023395 -_D:,Iris-versicolor,6.092986230876757,5.323491504409287 -_D:,Iris-versicolor,5.064020246438732,4.608909730008894 -_D:,Iris-versicolor,4.685148340884838,3.8519522930677232 -_D:,Iris-versicolor,5.581611212797471,5.160069542558326 -_D:,Iris-versicolor,5.445475981028535,4.419929343667774 -_D:,Iris-versicolor,5.946486926220956,5.1459833773263215 -_D:,Iris-versicolor,4.989360604871448,4.930078364218072 -_D:,Iris-versicolor,6.03286271372347,5.548157261113388 -_D:,Iris-versicolor,5.599275928354467,5.054702466605709 -_D:,Iris-versicolor,5.267449599849797,4.810353395755553 -_D:,Iris-versicolor,6.123382832850215,4.537648289297856 -_D:,Iris-versicolor,5.155956562699788,4.553101045795742 -_D:,Iris-versicolor,6.0473759480580656,5.377462438216211 -_D:,Iris-versicolor,5.509383508845732,5.032119807214825 -_D:,Iris-versicolor,6.329115122535858,4.8609849846135385 -_D:,Iris-versicolor,5.859700207775821,5.040344574095806 -_D:,Iris-versicolor,5.81413570511593,5.242699398686921 -_D:,Iris-versicolor,6.006961043214098,5.4183697753636615 -_D:,Iris-versicolor,6.396607952597476,5.316160059940694 -_D:,Iris-versicolor,6.577633923578247,5.487883208527084 -_D:,Iris-versicolor,5.834560068048925,5.111074162530968 -_D:,Iris-versicolor,4.892795525981836,4.667909043901078 -_D:,Iris-versicolor,5.071929491630652,4.421204082361892 -_D:,Iris-versicolor,4.957242986082623,4.412553027769875 -_D:,Iris-versicolor,5.264321008706797,4.8192175942030895 -_D:,Iris-versicolor,6.292544559458566,4.94516130671415 -_D:,Iris-versicolor,5.4948016042729355,4.980238793935716 -_D:,Iris-versicolor,5.75944371538022,5.580393986512508 -_D:,Iris-versicolor,6.263800020391029,5.561027271073654 -_D:,Iris-versicolor,5.978036892823293,4.652243143547671 -_D:,Iris-versicolor,5.253652116138878,5.033181402053425 -_D:,Iris-versicolor,5.274967011195318,4.531061840960656 -_D:,Iris-versicolor,5.424572016914717,4.625513824203992 -_D:,Iris-versicolor,5.862026034129797,5.236429549056926 -_D:,Iris-versicolor,5.348781900797956,4.728771422472485 -_D:,Iris-versicolor,4.489891065171127,4.1254002859436625 -_D:,Iris-versicolor,5.3907839912928255,4.757623931493361 -_D:,Iris-versicolor,5.307453573751144,5.065981139164654 -_D:,Iris-versicolor,5.390350170270803,4.979967066657817 -_D:,Iris-versicolor,5.709661381034398,5.168235726016927 -_D:,Iris-versicolor,4.3716421474580756,4.347956564963636 -_D:,Iris-versicolor,5.358560261242432,4.885301939558963 -_D:,Iris-virginica,7.323421646324768,5.690050203535673 -_D:,Iris-virginica,6.357753550341829,4.890322364767835 -_D:,Iris-virginica,7.535955596732253,5.6819621606557655 -_D:,Iris-virginica,6.80033427529343,5.265598656785007 -_D:,Iris-virginica,7.2209683289161575,5.463003241869551 -_D:,Iris-virginica,8.204019210854437,5.882887686119622 -_D:,Iris-virginica,5.478415461702605,4.34438451900287 -_D:,Iris-virginica,7.729583699619444,5.652683363923848 -_D:,Iris-virginica,7.230875690701601,5.048522359834326 -_D:,Iris-virginica,7.772675030657245,6.30491315647896 -_D:,Iris-virginica,6.648297331958487,5.620265043094353 -_D:,Iris-virginica,6.7874273237059555,5.1179323381460655 -_D:,Iris-virginica,7.146742508370896,5.561828740914276 -_D:,Iris-virginica,6.356623075792351,4.672411328827146 -_D:,Iris-virginica,6.614223583751759,5.015585898722028 -_D:,Iris-virginica,6.881994286002045,5.606876892851283 -_D:,Iris-virginica,6.820347707283804,5.430508501185606 -_D:,Iris-virginica,8.160258946192082,6.669215772364471 -_D:,Iris-virginica,8.649096750676604,5.56930851166386 -_D:,Iris-virginica,6.309535511567507,4.473732005048484 -_D:,Iris-virginica,7.375681698444933,5.801473985262766 -_D:,Iris-virginica,6.167254038597639,4.910736963052213 -_D:,Iris-virginica,8.310491651529492,5.7305761244013915 -_D:,Iris-virginica,6.446127454437865,5.065721014166677 -_D:,Iris-virginica,7.131749672855478,5.806482808191717 -_D:,Iris-virginica,7.423963861305202,5.8867900427806665 -_D:,Iris-virginica,6.30942940030594,5.1189353495622845 -_D:,Iris-virginica,6.262646655762151,5.2689242897408715 -_D:,Iris-virginica,7.048590243830385,5.229899574428954 -_D:,Iris-virginica,7.247261833271931,5.6843766347671725 -_D:,Iris-virginica,7.748466657060339,5.59968217238376 -_D:,Iris-virginica,7.9772348586177895,6.724267858166305 -_D:,Iris-virginica,7.10515134881865,5.236441151336846 -_D:,Iris-virginica,6.366359449061206,5.142870888225977 -_D:,Iris-virginica,6.548622005853021,4.887301728239255 -_D:,Iris-virginica,8.07875158007291,5.92265528784978 -_D:,Iris-virginica,7.00802344756605,5.767626365306011 -_D:,Iris-virginica,6.7417750537116445,5.4858323142653385 -_D:,Iris-virginica,6.15228409316162,5.2295829757217485 -_D:,Iris-virginica,7.114518778320504,5.689506748979877 -_D:,Iris-virginica,7.295978570323296,5.638886762401811 -_D:,Iris-virginica,7.0532647866177385,5.6962614697432885 -_D:,Iris-virginica,6.357753550341829,4.890322364767835 -_D:,Iris-virginica,7.439695337523697,5.768461104296018 -_D:,Iris-virginica,7.3579940928085374,5.832649115823288 -_D:,Iris-virginica,7.0332513546273665,5.5313516253426895 -_D:,Iris-virginica,6.613484943048682,4.889260769929234 -_D:,Iris-virginica,6.75909371558104,5.437263221949017 -_D:,Iris-virginica,6.782974379417489,5.719633996694872 -_D:,Iris-virginica,6.274423132800148,5.198679572439127 diff --git a/tensor/stats/pca/testdata/svd_projection01.csv b/tensor/stats/pca/testdata/svd_projection01.csv deleted file mode 100644 index bef23d7481..0000000000 --- a/tensor/stats/pca/testdata/svd_projection01.csv +++ /dev/null @@ -1,151 +0,0 @@ -$class,#Prjn0,#Prjn1 -Iris-setosa,-2.6692308782935164,-5.180887223993902 -Iris-setosa,-2.6964340118689543,-4.643645304250262 -Iris-setosa,-2.4811633041648697,-4.752183452725602 -Iris-setosa,-2.5715124347750264,-4.626614922344125 -Iris-setosa,-2.5906582247213565,-5.236211037073635 -Iris-setosa,-3.0080988099460635,-5.682216917525971 -Iris-setosa,-2.4909416646093447,-4.908713969812081 -Iris-setosa,-2.701454608343909,-5.053209215928301 -Iris-setosa,-2.461583693196518,-4.364930473160547 -Iris-setosa,-2.6716628159090603,-4.731768854441222 -Iris-setosa,-2.8313967819127916,-5.479803509512477 -Iris-setosa,-2.655105684822142,-4.980855020942431 -Iris-setosa,-2.5876357448399236,-4.599871891007371 -Iris-setosa,-2.152073732956799,-4.4073842762800135 -Iris-setosa,-2.7869627538023796,-5.900069370044279 -Iris-setosa,-2.9168820372918622,-6.25247171823636 -Iris-setosa,-2.7755972077070044,-5.673779006789473 -Iris-setosa,-2.7257919832817814,-5.187428800901794 -Iris-setosa,-3.1345846826114907,-5.694815200208339 -Iris-setosa,-2.7049109092473644,-5.467205226830108 -Iris-setosa,-3.0266540576265033,-5.206355516636537 -Iris-setosa,-2.7878075057670233,-5.381191154323272 -Iris-setosa,-2.149207974319233,-5.078845780997149 -Iris-setosa,-3.0659613780003934,-5.0217290889404955 -Iris-setosa,-2.829481886501436,-4.987183453994805 -Iris-setosa,-2.86492197502925,-4.685096095953507 -Iris-setosa,-2.872702218880204,-5.068401847428211 -Iris-setosa,-2.7795934408940473,-5.220228538013024 -Iris-setosa,-2.747803531865676,-5.12556341091417 -Iris-setosa,-2.655539505844164,-4.758511885777976 -Iris-setosa,-2.734112159416324,-4.703188072698243 -Iris-setosa,-3.0235254664835036,-5.215219715084074 -Iris-setosa,-2.565019386717419,-5.769020857593508 -Iris-setosa,-2.6938310857368224,-5.977704115236996 -Iris-setosa,-2.6716628159090603,-4.731768854441222 -Iris-setosa,-2.579749389727403,-4.8617694840464685 -Iris-setosa,-2.820054125896816,-5.327705091649766 -Iris-setosa,-2.6716628159090603,-4.731768854441222 -Iris-setosa,-2.3771228011053593,-4.455376644891153 -Iris-setosa,-2.753691770384675,-5.090441052263298 -Iris-setosa,-2.615429420681251,-5.148087486882674 -Iris-setosa,-2.6702695088541484,-3.851260512230936 -Iris-setosa,-2.3244518180425717,-4.640487943720612 -Iris-setosa,-2.95948893732534,-5.174040650658726 -Iris-setosa,-2.9939736164746886,-5.4821847144745 -Iris-setosa,-2.7007579548164533,-4.612955044823157 -Iris-setosa,-2.7064752048188643,-5.46277312760634 -Iris-setosa,-2.4870515426838677,-4.7170610940747295 -Iris-setosa,-2.779159619872025,-5.44257167317748 -Iris-setosa,-2.6696646993155384,-4.958544088829447 -Iris-versicolor,-6.33761490999367,-5.758736852585481 -Iris-versicolor,-5.96450224161781,-5.537668456115145 -Iris-versicolor,-6.484525145592094,-5.639709899111898 -Iris-versicolor,-5.327637994258107,-4.345950542131198 -Iris-versicolor,-6.180206770343917,-5.206787172475347 -Iris-versicolor,-5.591061863481493,-4.8937398502954625 -Iris-versicolor,-6.058741494153444,-5.6037528014710185 -Iris-versicolor,-4.411318411598969,-4.180724099023395 -Iris-versicolor,-6.092986230876758,-5.323491504409287 -Iris-versicolor,-5.064020246438733,-4.608909730008894 -Iris-versicolor,-4.68514834088484,-3.8519522930677237 -Iris-versicolor,-5.581611212797473,-5.160069542558327 -Iris-versicolor,-5.445475981028536,-4.419929343667774 -Iris-versicolor,-5.946486926220958,-5.1459833773263215 -Iris-versicolor,-4.98936060487145,-4.930078364218073 -Iris-versicolor,-6.032862713723472,-5.548157261113388 -Iris-versicolor,-5.599275928354468,-5.05470246660571 -Iris-versicolor,-5.267449599849799,-4.810353395755553 -Iris-versicolor,-6.123382832850217,-4.537648289297856 -Iris-versicolor,-5.15595656269979,-4.553101045795744 -Iris-versicolor,-6.047375948058068,-5.377462438216212 -Iris-versicolor,-5.509383508845733,-5.032119807214826 -Iris-versicolor,-6.32911512253586,-4.860984984613539 -Iris-versicolor,-5.8597002077758225,-5.040344574095807 -Iris-versicolor,-5.814135705115932,-5.242699398686921 -Iris-versicolor,-6.006961043214099,-5.418369775363661 -Iris-versicolor,-6.396607952597479,-5.316160059940693 -Iris-versicolor,-6.577633923578249,-5.4878832085270846 -Iris-versicolor,-5.834560068048927,-5.111074162530969 -Iris-versicolor,-4.892795525981839,-4.667909043901078 -Iris-versicolor,-5.0719294916306525,-4.421204082361892 -Iris-versicolor,-4.9572429860826235,-4.412553027769875 -Iris-versicolor,-5.2643210087067995,-4.8192175942030895 -Iris-versicolor,-6.292544559458569,-4.94516130671415 -Iris-versicolor,-5.494801604272937,-4.980238793935717 -Iris-versicolor,-5.759443715380222,-5.580393986512508 -Iris-versicolor,-6.263800020391032,-5.561027271073655 -Iris-versicolor,-5.978036892823295,-4.652243143547671 -Iris-versicolor,-5.25365211613888,-5.033181402053426 -Iris-versicolor,-5.274967011195319,-4.531061840960657 -Iris-versicolor,-5.42457201691472,-4.625513824203992 -Iris-versicolor,-5.862026034129799,-5.2364295490569255 -Iris-versicolor,-5.348781900797959,-4.728771422472485 -Iris-versicolor,-4.489891065171128,-4.1254002859436625 -Iris-versicolor,-5.390783991292826,-4.757623931493362 -Iris-versicolor,-5.307453573751146,-5.065981139164655 -Iris-versicolor,-5.390350170270805,-4.979967066657818 -Iris-versicolor,-5.7096613810344,-5.168235726016928 -Iris-versicolor,-4.371642147458076,-4.347956564963637 -Iris-versicolor,-5.358560261242434,-4.885301939558964 -Iris-virginica,-7.32342164632477,-5.690050203535675 -Iris-virginica,-6.357753550341831,-4.890322364767834 -Iris-virginica,-7.535955596732257,-5.681962160655765 -Iris-virginica,-6.800334275293433,-5.265598656785008 -Iris-virginica,-7.22096832891616,-5.463003241869552 -Iris-virginica,-8.204019210854439,-5.882887686119622 -Iris-virginica,-5.478415461702606,-4.344384519002871 -Iris-virginica,-7.729583699619447,-5.652683363923849 -Iris-virginica,-7.230875690701603,-5.048522359834328 -Iris-virginica,-7.772675030657247,-6.30491315647896 -Iris-virginica,-6.648297331958489,-5.620265043094353 -Iris-virginica,-6.787427323705957,-5.117932338146065 -Iris-virginica,-7.146742508370899,-5.561828740914276 -Iris-virginica,-6.356623075792353,-4.672411328827147 -Iris-virginica,-6.614223583751762,-5.015585898722027 -Iris-virginica,-6.881994286002047,-5.606876892851284 -Iris-virginica,-6.820347707283807,-5.4305085011856065 -Iris-virginica,-8.160258946192084,-6.669215772364471 -Iris-virginica,-8.649096750676605,-5.56930851166386 -Iris-virginica,-6.309535511567509,-4.473732005048484 -Iris-virginica,-7.375681698444936,-5.801473985262767 -Iris-virginica,-6.16725403859764,-4.910736963052214 -Iris-virginica,-8.310491651529494,-5.730576124401391 -Iris-virginica,-6.4461274544378675,-5.065721014166677 -Iris-virginica,-7.131749672855481,-5.806482808191716 -Iris-virginica,-7.423963861305205,-5.886790042780667 -Iris-virginica,-6.309429400305942,-5.1189353495622845 -Iris-virginica,-6.262646655762153,-5.2689242897408715 -Iris-virginica,-7.048590243830388,-5.229899574428954 -Iris-virginica,-7.247261833271932,-5.684376634767173 -Iris-virginica,-7.748466657060342,-5.599682172383759 -Iris-virginica,-7.977234858617792,-6.724267858166306 -Iris-virginica,-7.105151348818652,-5.236441151336847 -Iris-virginica,-6.366359449061208,-5.142870888225977 -Iris-virginica,-6.548622005853022,-4.887301728239255 -Iris-virginica,-8.078751580072911,-5.922655287849781 -Iris-virginica,-7.0080234475660514,-5.767626365306012 -Iris-virginica,-6.741775053711646,-5.485832314265339 -Iris-virginica,-6.152284093161622,-5.229582975721749 -Iris-virginica,-7.114518778320507,-5.689506748979878 -Iris-virginica,-7.295978570323298,-5.638886762401812 -Iris-virginica,-7.053264786617741,-5.696261469743289 -Iris-virginica,-6.357753550341831,-4.890322364767834 -Iris-virginica,-7.4396953375237,-5.768461104296019 -Iris-virginica,-7.35799409280854,-5.832649115823288 -Iris-virginica,-7.033251354627368,-5.53135162534269 -Iris-virginica,-6.613484943048684,-4.889260769929234 -Iris-virginica,-6.7590937155810416,-5.437263221949018 -Iris-virginica,-6.7829743794174915,-5.719633996694874 -Iris-virginica,-6.274423132800151,-5.1986795724391275 diff --git a/tensor/stats/simat/README.md b/tensor/stats/simat/README.md deleted file mode 100644 index 6d2f2ee7f7..0000000000 --- a/tensor/stats/simat/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# simat - -`simat` provides similarity / distance matrix functions that create a `SimMat` matrix from Tensor or Table data. Any metric function defined in metric package (or user-created) can be used. - -The SimMat contains the Tensor of the similarity matrix values, and labels for the Rows and Columns. - -The `etview` package provides a `SimMatGrid` widget that displays the SimMat with the labels. - diff --git a/tensor/stats/simat/doc.go b/tensor/stats/simat/doc.go deleted file mode 100644 index fcc4953bac..0000000000 --- a/tensor/stats/simat/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package simat provides similarity / distance matrix functions that create -a SimMat matrix from Tensor or Table data. Any metric function defined -in metric package (or user-created) can be used. - -The SimMat contains the Tensor of the similarity matrix values, and -labels for the Rows and Columns. - -The etview package provides a SimMatGrid widget that displays the SimMat -with the labels. -*/ -package simat diff --git a/tensor/stats/simat/simat.go b/tensor/stats/simat/simat.go deleted file mode 100644 index 49f4f06b0d..0000000000 --- a/tensor/stats/simat/simat.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package simat - -import ( - "fmt" - - "cogentcore.org/core/tensor" - "cogentcore.org/core/tensor/stats/metric" - "cogentcore.org/core/tensor/table" -) - -// SimMat is a similarity / distance matrix with additional row and column -// labels for display purposes. -type SimMat struct { - - // the similarity / distance matrix (typically an tensor.Float64) - Mat tensor.Tensor - - // labels for the rows -- blank rows trigger generation of grouping lines - Rows []string - - // labels for the columns -- blank columns trigger generation of grouping lines - Columns []string -} - -// NewSimMat returns a new SimMat similarity matrix -func NewSimMat() *SimMat { - return &SimMat{} -} - -// Init initializes SimMat with default Matrix and nil rows, cols -func (smat *SimMat) Init() { - smat.Mat = &tensor.Float64{} - smat.Mat.SetMetaData("grid-fill", "1") // best for sim mats -- can override later if need to - smat.Rows = nil - smat.Columns = nil -} - -// TableColumnStd generates a similarity / distance matrix from given column name -// in given IndexView of an table.Table, and given standard metric function. -// if labNm is not empty, uses given column name for labels, which if blankRepeat -// is true are filtered so that any sequentially repeated labels are blank. -// This Std version is usable e.g., in Python where the func cannot be passed. -func (smat *SimMat) TableColumnStd(ix *table.IndexView, column, labNm string, blankRepeat bool, met metric.StdMetrics) error { - return smat.TableColumn(ix, column, labNm, blankRepeat, metric.StdFunc64(met)) -} - -// TableColumn generates a similarity / distance matrix from given column name -// in given IndexView of an table.Table, and given metric function. -// if labNm is not empty, uses given column name for labels, which if blankRepeat -// is true are filtered so that any sequentially repeated labels are blank. -func (smat *SimMat) TableColumn(ix *table.IndexView, column, labNm string, blankRepeat bool, mfun metric.Func64) error { - col, err := ix.Table.ColumnByName(column) - if err != nil { - return err - } - smat.Init() - sm := smat.Mat - - rows := ix.Len() - nd := col.NumDims() - if nd < 2 || rows == 0 { - return fmt.Errorf("simat.Tensor: must have 2 or more dims and rows != 0") - } - ln := col.Len() - sz := ln / col.DimSize(0) // size of cell - - sshp := []int{rows, rows} - sm.SetShape(sshp) - - av := make([]float64, sz) - bv := make([]float64, sz) - ardim := []int{0} - brdim := []int{0} - sdim := []int{0, 0} - for ai := 0; ai < rows; ai++ { - ardim[0] = ix.Indexes[ai] - sdim[0] = ai - ar := col.SubSpace(ardim) - ar.Floats(&av) - for bi := 0; bi <= ai; bi++ { // lower diag - brdim[0] = ix.Indexes[bi] - sdim[1] = bi - br := col.SubSpace(brdim) - br.Floats(&bv) - sv := mfun(av, bv) - sm.SetFloat(sdim, sv) - } - } - // now fill in upper diagonal with values from lower diagonal - // note: assumes symmetric distance function - fdim := []int{0, 0} - for ai := 0; ai < rows; ai++ { - sdim[0] = ai - fdim[1] = ai - for bi := ai + 1; bi < rows; bi++ { // upper diag - fdim[0] = bi - sdim[1] = bi - sv := sm.Float(fdim) - sm.SetFloat(sdim, sv) - } - } - - if nm, has := ix.Table.MetaData["name"]; has { - sm.SetMetaData("name", nm+"_"+column) - } else { - sm.SetMetaData("name", column) - } - if ds, has := ix.Table.MetaData["desc"]; has { - sm.SetMetaData("desc", ds) - } - - if labNm == "" { - return nil - } - lc, err := ix.Table.ColumnByName(labNm) - if err != nil { - return err - } - smat.Rows = make([]string, rows) - last := "" - for r := 0; r < rows; r++ { - lbl := lc.String1D(ix.Indexes[r]) - if blankRepeat && lbl == last { - continue - } - smat.Rows[r] = lbl - last = lbl - } - smat.Columns = smat.Rows // identical - return nil -} - -// BlankRepeat returns string slice with any sequentially repeated strings blanked out -func BlankRepeat(str []string) []string { - sz := len(str) - br := make([]string, sz) - last := "" - for r, s := range str { - if s == last { - continue - } - br[r] = s - last = s - } - return br -} diff --git a/tensor/stats/simat/simat_test.go b/tensor/stats/simat/simat_test.go deleted file mode 100644 index 103f7ae9a6..0000000000 --- a/tensor/stats/simat/simat_test.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package simat - -import ( - "testing" - - "cogentcore.org/core/tensor/stats/metric" - "cogentcore.org/core/tensor/table" - - "github.com/stretchr/testify/assert" -) - -var simres = `Tensor: [12, 12] -[0]: 0 3.4641016151377544 8.831760866327848 9.273618495495704 8.717797887081348 9.38083151964686 4.69041575982343 5.830951894845301 8.12403840463596 8.54400374531753 5.291502622129181 6.324555320336759 -[1]: 3.4641016151377544 0 9.38083151964686 8.717797887081348 9.273618495495704 8.831760866327848 5.830951894845301 4.69041575982343 8.717797887081348 7.937253933193772 6.324555320336759 5.291502622129181 -[2]: 8.831760866327848 9.38083151964686 0 3.4641016151377544 4.242640687119285 5.0990195135927845 9.38083151964686 9.899494936611665 4.47213595499958 5.744562646538029 9.38083151964686 9.899494936611665 -[3]: 9.273618495495704 8.717797887081348 3.4641016151377544 0 5.477225575051661 3.7416573867739413 9.797958971132712 9.273618495495704 5.656854249492381 4.58257569495584 9.797958971132712 9.273618495495704 -[4]: 8.717797887081348 9.273618495495704 4.242640687119285 5.477225575051661 0 4 8.831760866327848 9.38083151964686 4.242640687119285 5.5677643628300215 8.831760866327848 9.38083151964686 -[5]: 9.38083151964686 8.831760866327848 5.0990195135927845 3.7416573867739413 4 0 9.486832980505138 8.94427190999916 5.830951894845301 4.795831523312719 9.486832980505138 8.94427190999916 -[6]: 4.69041575982343 5.830951894845301 9.38083151964686 9.797958971132712 8.831760866327848 9.486832980505138 0 3.4641016151377544 9.16515138991168 9.539392014169456 4.242640687119285 5.477225575051661 -[7]: 5.830951894845301 4.69041575982343 9.899494936611665 9.273618495495704 9.38083151964686 8.94427190999916 3.4641016151377544 0 9.695359714832659 9 5.477225575051661 4.242640687119285 -[8]: 8.12403840463596 8.717797887081348 4.47213595499958 5.656854249492381 4.242640687119285 5.830951894845301 9.16515138991168 9.695359714832659 0 3.605551275463989 9.16515138991168 9.695359714832659 -[9]: 8.54400374531753 7.937253933193772 5.744562646538029 4.58257569495584 5.5677643628300215 4.795831523312719 9.539392014169456 9 3.605551275463989 0 9.539392014169456 9 -[10]: 5.291502622129181 6.324555320336759 9.38083151964686 9.797958971132712 8.831760866327848 9.486832980505138 4.242640687119285 5.477225575051661 9.16515138991168 9.539392014169456 0 3.4641016151377544 -[11]: 6.324555320336759 5.291502622129181 9.899494936611665 9.273618495495704 9.38083151964686 8.94427190999916 5.477225575051661 4.242640687119285 9.695359714832659 9 3.4641016151377544 0 -` - -func TestSimMat(t *testing.T) { - dt := &table.Table{} - err := dt.OpenCSV("../clust/testdata/faces.dat", table.Tab) - if err != nil { - t.Error(err) - } - ix := table.NewIndexView(dt) - smat := &SimMat{} - smat.TableColumn(ix, "Input", "Name", false, metric.Euclidean64) - - // fmt.Println(smat.Mat) - assert.Equal(t, simres, smat.Mat.String()) -} diff --git a/tensor/stats/simat/tensor.go b/tensor/stats/simat/tensor.go deleted file mode 100644 index fc6d2c0e8a..0000000000 --- a/tensor/stats/simat/tensor.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package simat - -import ( - "fmt" - - "cogentcore.org/core/tensor" - "cogentcore.org/core/tensor/stats/metric" -) - -// Tensor computes a similarity / distance matrix on tensor -// using given metric function. Outer-most dimension ("rows") is -// used as "indexical" dimension and all other dimensions within that -// are compared. -// Results go in smat which is ensured to have proper square 2D shape -// (rows * rows). -func Tensor(smat tensor.Tensor, a tensor.Tensor, mfun metric.Func64) error { - rows := a.DimSize(0) - nd := a.NumDims() - if nd < 2 || rows == 0 { - return fmt.Errorf("simat.Tensor: must have 2 or more dims and rows != 0") - } - ln := a.Len() - sz := ln / rows - - sshp := []int{rows, rows} - smat.SetShape(sshp) - - av := make([]float64, sz) - bv := make([]float64, sz) - ardim := []int{0} - brdim := []int{0} - sdim := []int{0, 0} - for ai := 0; ai < rows; ai++ { - ardim[0] = ai - sdim[0] = ai - ar := a.SubSpace(ardim) - ar.Floats(&av) - for bi := 0; bi <= ai; bi++ { // lower diag - brdim[0] = bi - sdim[1] = bi - br := a.SubSpace(brdim) - br.Floats(&bv) - sv := mfun(av, bv) - smat.SetFloat(sdim, sv) - } - } - // now fill in upper diagonal with values from lower diagonal - // note: assumes symmetric distance function - fdim := []int{0, 0} - for ai := 0; ai < rows; ai++ { - sdim[0] = ai - fdim[1] = ai - for bi := ai + 1; bi < rows; bi++ { // upper diag - fdim[0] = bi - sdim[1] = bi - sv := smat.Float(fdim) - smat.SetFloat(sdim, sv) - } - } - return nil -} - -// Tensors computes a similarity / distance matrix on two tensors -// using given metric function. Outer-most dimension ("rows") is -// used as "indexical" dimension and all other dimensions within that -// are compared. Resulting reduced 2D shape of two tensors must be -// the same (returns error if not). -// Rows of smat = a, cols = b -func Tensors(smat tensor.Tensor, a, b tensor.Tensor, mfun metric.Func64) error { - arows := a.DimSize(0) - and := a.NumDims() - brows := b.DimSize(0) - bnd := b.NumDims() - if and < 2 || bnd < 2 || arows == 0 || brows == 0 { - return fmt.Errorf("simat.Tensors: must have 2 or more dims and rows != 0") - } - alen := a.Len() - asz := alen / arows - blen := b.Len() - bsz := blen / brows - if asz != bsz { - return fmt.Errorf("simat.Tensors: size of inner dimensions must be same") - } - - sshp := []int{arows, brows} - smat.SetShape(sshp, "a", "b") - - av := make([]float64, asz) - bv := make([]float64, bsz) - ardim := []int{0} - brdim := []int{0} - sdim := []int{0, 0} - for ai := 0; ai < arows; ai++ { - ardim[0] = ai - sdim[0] = ai - ar := a.SubSpace(ardim) - ar.Floats(&av) - for bi := 0; bi < brows; bi++ { - brdim[0] = bi - sdim[1] = bi - br := b.SubSpace(brdim) - br.Floats(&bv) - sv := mfun(av, bv) - smat.SetFloat(sdim, sv) - } - } - return nil -} - -// TensorStd computes a similarity / distance matrix on tensor -// using given Std metric function. Outer-most dimension ("rows") is -// used as "indexical" dimension and all other dimensions within that -// are compared. -// Results go in smat which is ensured to have proper square 2D shape -// (rows * rows). -// This Std version is usable e.g., in Python where the func cannot be passed. -func TensorStd(smat tensor.Tensor, a tensor.Tensor, met metric.StdMetrics) error { - return Tensor(smat, a, metric.StdFunc64(met)) -} - -// TensorsStd computes a similarity / distance matrix on two tensors -// using given Std metric function. Outer-most dimension ("rows") is -// used as "indexical" dimension and all other dimensions within that -// are compared. Resulting reduced 2D shape of two tensors must be -// the same (returns error if not). -// Rows of smat = a, cols = b -// This Std version is usable e.g., in Python where the func cannot be passed. -func TensorsStd(smat tensor.Tensor, a, b tensor.Tensor, met metric.StdMetrics) error { - return Tensors(smat, a, b, metric.StdFunc64(met)) -} diff --git a/tensor/stats/split/README.md b/tensor/stats/split/README.md deleted file mode 100644 index 6d6cdfde63..0000000000 --- a/tensor/stats/split/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# split - -`split` provides `GroupBy`, `Agg`, `Permute` and other functions that create and populate Splits of `table.Table` data. These are powerful tools for quickly summarizing and analyzing data. - - diff --git a/tensor/stats/split/agg.go b/tensor/stats/split/agg.go deleted file mode 100644 index cea87595be..0000000000 --- a/tensor/stats/split/agg.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package split - -import ( - "fmt" - - "cogentcore.org/core/tensor/stats/stats" - "cogentcore.org/core/tensor/table" -) - -// AggIndex performs aggregation using given standard statistic (e.g., Mean) across -// all splits, and returns the SplitAgg container of the results, which are also -// stored in the Splits. Column is specified by index. -func AggIndex(spl *table.Splits, colIndex int, stat stats.Stats) *table.SplitAgg { - ag := spl.AddAgg(stat.String(), colIndex) - for _, sp := range spl.Splits { - agv := stats.StatIndex(sp, colIndex, stat) - ag.Aggs = append(ag.Aggs, agv) - } - return ag -} - -// AggColumn performs aggregation using given standard statistic (e.g., Mean) across -// all splits, and returns the SplitAgg container of the results, which are also -// stored in the Splits. Column is specified by name; returns error for bad column name. -func AggColumn(spl *table.Splits, column string, stat stats.Stats) (*table.SplitAgg, error) { - dt := spl.Table() - if dt == nil { - return nil, fmt.Errorf("split.AggTry: No splits to aggregate over") - } - colIndex, err := dt.ColumnIndex(column) - if err != nil { - return nil, err - } - return AggIndex(spl, colIndex, stat), nil -} - -// AggAllNumericColumns performs aggregation using given standard aggregation function across -// all splits, for all number-valued columns in the table. -func AggAllNumericColumns(spl *table.Splits, stat stats.Stats) { - dt := spl.Table() - for ci, cl := range dt.Columns { - if cl.IsString() { - continue - } - AggIndex(spl, ci, stat) - } -} - -/////////////////////////////////////////////////// -// Desc - -// DescIndex performs aggregation using standard statistics across -// all splits, and stores results in the Splits. Column is specified by index. -func DescIndex(spl *table.Splits, colIndex int) { - dt := spl.Table() - if dt == nil { - return - } - col := dt.Columns[colIndex] - sts := stats.DescStats - if col.NumDims() > 1 { // nd cannot do qiles - sts = stats.DescStatsND - } - for _, st := range sts { - AggIndex(spl, colIndex, st) - } -} - -// DescColumn performs aggregation using standard statistics across -// all splits, and stores results in the Splits. -// Column is specified by name; returns error for bad column name. -func DescColumn(spl *table.Splits, column string) error { - dt := spl.Table() - if dt == nil { - return fmt.Errorf("split.DescTry: No splits to aggregate over") - } - colIndex, err := dt.ColumnIndex(column) - if err != nil { - return err - } - DescIndex(spl, colIndex) - return nil -} diff --git a/tensor/stats/split/agg_test.go b/tensor/stats/split/agg_test.go deleted file mode 100644 index e9fae284c7..0000000000 --- a/tensor/stats/split/agg_test.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package split - -import ( - "testing" - - "cogentcore.org/core/tensor/stats/stats" - "cogentcore.org/core/tensor/table" - - "github.com/stretchr/testify/assert" -) - -func TestAgg(t *testing.T) { - dt := table.NewTable().SetNumRows(4) - dt.AddStringColumn("Group") - dt.AddFloat32Column("Value") - for i := 0; i < dt.Rows; i++ { - gp := "A" - if i >= 2 { - gp = "B" - } - dt.SetString("Group", i, gp) - dt.SetFloat("Value", i, float64(i)) - } - ix := table.NewIndexView(dt) - spl := GroupBy(ix, "Group") - assert.Equal(t, 2, len(spl.Splits)) - - AggColumn(spl, "Value", stats.Mean) - - st := spl.AggsToTable(table.ColumnNameOnly) - assert.Equal(t, 0.5, st.Float("Value", 0)) - assert.Equal(t, 2.5, st.Float("Value", 1)) - assert.Equal(t, "A", st.StringValue("Group", 0)) - assert.Equal(t, "B", st.StringValue("Group", 1)) -} - -func TestAggEmpty(t *testing.T) { - dt := table.NewTable().SetNumRows(4) - dt.AddStringColumn("Group") - dt.AddFloat32Column("Value") - for i := 0; i < dt.Rows; i++ { - gp := "A" - if i >= 2 { - gp = "B" - } - dt.SetString("Group", i, gp) - dt.SetFloat("Value", i, float64(i)) - } - ix := table.NewIndexView(dt) - ix.Filter(func(et *table.Table, row int) bool { - return false // exclude all - }) - spl := GroupBy(ix, "Group") - assert.Equal(t, 1, len(spl.Splits)) - - AggColumn(spl, "Value", stats.Mean) - - st := spl.AggsToTable(table.ColumnNameOnly) - if st == nil { - t.Error("AggsToTable should not be nil!") - } -} diff --git a/tensor/stats/split/doc.go b/tensor/stats/split/doc.go deleted file mode 100644 index 5af6a8cb87..0000000000 --- a/tensor/stats/split/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package split provides GroupBy, Agg, Permute and other functions that -create and populate Splits of table.Table data. These are powerful -tools for quickly summarizing and analyzing data. -*/ -package split diff --git a/tensor/stats/split/group.go b/tensor/stats/split/group.go deleted file mode 100644 index 3a8259fdef..0000000000 --- a/tensor/stats/split/group.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package split - -//go:generate core generate - -import ( - "log" - "slices" - - "cogentcore.org/core/base/errors" - "cogentcore.org/core/tensor/table" -) - -// All returns a single "split" with all of the rows in given view -// useful for leveraging the aggregation management functions in splits -func All(ix *table.IndexView) *table.Splits { - spl := &table.Splits{} - spl.Levels = []string{"All"} - spl.New(ix.Table, []string{"All"}, ix.Indexes...) - return spl -} - -// GroupByIndex returns a new Splits set based on the groups of values -// across the given set of column indexes. -// Uses a stable sort on columns, so ordering of other dimensions is preserved. -func GroupByIndex(ix *table.IndexView, colIndexes []int) *table.Splits { - nc := len(colIndexes) - if nc == 0 || ix.Table == nil { - return nil - } - if ix.Table.ColumnNames == nil { - log.Println("split.GroupBy: Table does not have any column names -- will not work") - return nil - } - spl := &table.Splits{} - spl.Levels = make([]string, nc) - for i, ci := range colIndexes { - spl.Levels[i] = ix.Table.ColumnNames[ci] - } - srt := ix.Clone() - srt.SortStableColumns(colIndexes, true) // important for consistency - lstValues := make([]string, nc) - curValues := make([]string, nc) - var curIx *table.IndexView - for _, rw := range srt.Indexes { - diff := false - for i, ci := range colIndexes { - cl := ix.Table.Columns[ci] - cv := cl.String1D(rw) - curValues[i] = cv - if cv != lstValues[i] { - diff = true - } - } - if diff || curIx == nil { - curIx = spl.New(ix.Table, curValues, rw) - copy(lstValues, curValues) - } else { - curIx.AddIndex(rw) - } - } - if spl.Len() == 0 { // prevent crashing from subsequent ops: add an empty split - spl.New(ix.Table, curValues) // no rows added here - } - return spl -} - -// GroupBy returns a new Splits set based on the groups of values -// across the given set of column names. -// Uses a stable sort on columns, so ordering of other dimensions is preserved. -func GroupBy(ix *table.IndexView, columns ...string) *table.Splits { - return GroupByIndex(ix, errors.Log1(ix.Table.ColumnIndexesByNames(columns...))) -} - -// GroupByFunc returns a new Splits set based on the given function -// which returns value(s) to group on for each row of the table. -// The function should always return the same number of values -- if -// it doesn't behavior is undefined. -// Uses a stable sort on columns, so ordering of other dimensions is preserved. -func GroupByFunc(ix *table.IndexView, fun func(row int) []string) *table.Splits { - if ix.Table == nil { - return nil - } - - // save function values - funvals := make(map[int][]string, ix.Len()) - nv := 0 // number of valeus - for _, rw := range ix.Indexes { - sv := fun(rw) - if nv == 0 { - nv = len(sv) - } - funvals[rw] = slices.Clone(sv) - } - - srt := ix.Clone() - srt.SortStable(func(et *table.Table, i, j int) bool { // sort based on given function values - fvi := funvals[i] - fvj := funvals[j] - for fi := 0; fi < nv; fi++ { - if fvi[fi] < fvj[fi] { - return true - } else if fvi[fi] > fvj[fi] { - return false - } - } - return false - }) - - // now do our usual grouping operation - spl := &table.Splits{} - lstValues := make([]string, nv) - var curIx *table.IndexView - for _, rw := range srt.Indexes { - curValues := funvals[rw] - diff := (curIx == nil) - if !diff { - for fi := 0; fi < nv; fi++ { - if lstValues[fi] != curValues[fi] { - diff = true - break - } - } - } - if diff { - curIx = spl.New(ix.Table, curValues, rw) - copy(lstValues, curValues) - } else { - curIx.AddIndex(rw) - } - } - return spl -} diff --git a/tensor/stats/split/random.go b/tensor/stats/split/random.go deleted file mode 100644 index 4099d22e18..0000000000 --- a/tensor/stats/split/random.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package split - -import ( - "fmt" - "math" - - "cogentcore.org/core/tensor/table" - "gonum.org/v1/gonum/floats" -) - -// Permuted generates permuted random splits of table rows, using given list of probabilities, -// which will be normalized to sum to 1 (error returned if sum = 0) -// names are optional names for each split (e.g., Train, Test) which will be -// used to label the Values of the resulting Splits. -func Permuted(ix *table.IndexView, probs []float64, names []string) (*table.Splits, error) { - if ix == nil || ix.Len() == 0 { - return nil, fmt.Errorf("split.Random table is nil / empty") - } - np := len(probs) - if len(names) > 0 && len(names) != np { - return nil, fmt.Errorf("split.Random names not same len as probs") - } - sum := floats.Sum(probs) - if sum == 0 { - return nil, fmt.Errorf("split.Random probs sum to 0") - } - nr := ix.Len() - ns := make([]int, np) - cum := 0 - fnr := float64(nr) - for i, p := range probs { - p /= sum - per := int(math.Round(p * fnr)) - if cum+per > nr { - per = nr - cum - if per <= 0 { - break - } - } - ns[i] = per - cum += per - } - spl := &table.Splits{} - perm := ix.Clone() - perm.Permuted() - cum = 0 - spl.SetLevels("permuted") - for i, n := range ns { - nm := "" - if names != nil { - nm = names[i] - } else { - nm = fmt.Sprintf("p=%v", probs[i]/sum) - } - spl.New(ix.Table, []string{nm}, perm.Indexes[cum:cum+n]...) - cum += n - } - return spl, nil -} diff --git a/tensor/stats/split/random_test.go b/tensor/stats/split/random_test.go deleted file mode 100644 index 507ebd77d5..0000000000 --- a/tensor/stats/split/random_test.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package split - -import ( - "testing" - - "cogentcore.org/core/tensor/table" - - "github.com/stretchr/testify/assert" -) - -func TestPermuted(t *testing.T) { - dt := table.NewTable().SetNumRows(25) - dt.AddStringColumn("Name") - dt.AddFloat32TensorColumn("Input", []int{5, 5}, "Y", "X") - dt.AddFloat32TensorColumn("Output", []int{5, 5}, "Y", "X") - ix := table.NewIndexView(dt) - spl, err := Permuted(ix, []float64{.5, .5}, nil) - if err != nil { - t.Error(err) - } - // for i, sp := range spl.Splits { - // fmt.Printf("split: %v name: %v len: %v idxs: %v\n", i, spl.Values[i], len(sp.Indexes), sp.Indexes) - // } - assert.Equal(t, 2, len(spl.Splits)) - assert.Contains(t, []int{12, 13}, len(spl.Splits[0].Indexes)) - assert.Contains(t, []int{12, 13}, len(spl.Splits[1].Indexes)) - - spl, err = Permuted(ix, []float64{.25, .5, .25}, []string{"test", "train", "validate"}) - if err != nil { - t.Error(err) - } - // for i, sp := range spl.Splits { - // fmt.Printf("split: %v name: %v len: %v idxs: %v\n", i, spl.Values[i], len(sp.Indexes), sp.Indexes) - // } - assert.Equal(t, 3, len(spl.Splits)) - assert.Equal(t, 6, len(spl.Splits[0].Indexes)) - assert.Equal(t, 13, len(spl.Splits[1].Indexes)) - assert.Equal(t, 6, len(spl.Splits[2].Indexes)) -} diff --git a/tensor/stats/stats/README.md b/tensor/stats/stats/README.md deleted file mode 100644 index 5225e42383..0000000000 --- a/tensor/stats/stats/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# stats - -The `stats` package provides standard statistic computations operating over floating-point data (both 32 and 64 bit) in the following formats. Each statistic returns a single scalar value summarizing the data in a different way. Some formats also support multi-dimensional tensor data, returning a summary stat for each tensor value, using the outer-most ("row-wise") dimension to summarize over. - -* `[]float32` and `[]float64` slices, as e.g., `Mean32` and `Mean64`, skipping any `NaN` values as missing data. - -* `tensor.Float32`, `tensor.Float64` using the underlying `Values` slice, and other generic `Tensor` using the `Floats` interface (less efficient). - -* `table.IndexView` indexed views of `table.Table` data, with `*Column` functions (e.g., `MeanColumn`) using names to specify columns, and `*Index` versions operating on column indexes. Also available for this type are `CountIf*`, `PctIf*`, `PropIf*` functions that return count, percentage, or propoprtion of values according to given function. - -## Stats - -The following statistics are supported (per the `Stats` enum in `stats.go`): - -* `Count`: count of number of elements -* `Sum`: sum of elements -* `Prod`: product of elements -* `Min`: minimum value -* `Max`: max maximum value -* `MinAbs`: minimum absolute value -* `MaxAbs`: maximum absolute value -* `Mean`: mean mean value -* `Var`: sample variance (squared diffs from mean, divided by n-1) -* `Std`: sample standard deviation (sqrt of Var) -* `Sem`: sample standard error of the mean (Std divided by sqrt(n)) -* `L1Norm`: L1 Norm: sum of absolute values -* `SumSq`: sum of squared element values -* `L2Norm`: L2 Norm: square-root of sum-of-squares -* `VarPop`: population variance (squared diffs from mean, divided by n) -* `StdPop`: population standard deviation (sqrt of VarPop) -* `SemPop`: population standard error of the mean (StdPop divided by sqrt(n)) -* `Median`: middle value in sorted ordering (only for IndexView) -* `Q1`: Q1 first quartile = 25%ile value = .25 quantile value (only for IndexView) -* `Q3`: Q3 third quartile = 75%ile value = .75 quantile value (only for IndexView) - - diff --git a/tensor/stats/stats/desc.go b/tensor/stats/stats/desc.go deleted file mode 100644 index e6795cef8c..0000000000 --- a/tensor/stats/stats/desc.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package stats - -import ( - "cogentcore.org/core/tensor/table" -) - -// DescStats are all the standard stats -var DescStats = []Stats{Count, Mean, Std, Sem, Min, Max, Q1, Median, Q3} - -// DescStatsND are all the standard stats for n-dimensional (n > 1) data -- cannot do quantiles -var DescStatsND = []Stats{Count, Mean, Std, Sem, Min, Max} - -// DescAll returns a table of standard descriptive stats for -// all numeric columns in given table, operating over all non-Null, non-NaN elements -// in each column. -func DescAll(ix *table.IndexView) *table.Table { - st := ix.Table - nAgg := len(DescStats) - dt := table.NewTable().SetNumRows(nAgg) - dt.AddStringColumn("Stat") - for ci := range st.Columns { - col := st.Columns[ci] - if col.IsString() { - continue - } - dt.AddFloat64TensorColumn(st.ColumnNames[ci], col.Shape().Sizes[1:], col.Shape().Names[1:]...) - } - dtnm := dt.Columns[0] - dtci := 1 - qs := []float64{.25, .5, .75} - sq := len(DescStatsND) - for ci := range st.Columns { - col := st.Columns[ci] - if col.IsString() { - continue - } - _, csz := col.RowCellSize() - dtst := dt.Columns[dtci] - for i, styp := range DescStatsND { - ag := StatIndex(ix, ci, styp) - si := i * csz - for j := 0; j < csz; j++ { - dtst.SetFloat1D(si+j, ag[j]) - } - if dtci == 1 { - dtnm.SetString1D(i, styp.String()) - } - } - if col.NumDims() == 1 { - qvs := QuantilesIndex(ix, ci, qs) - for i, qv := range qvs { - dtst.SetFloat1D(sq+i, qv) - dtnm.SetString1D(sq+i, DescStats[sq+i].String()) - } - } - dtci++ - } - return dt -} - -// DescIndex returns a table of standard descriptive aggregates -// of non-Null, non-NaN elements in given IndexView indexed view of an -// table.Table, for given column index. -func DescIndex(ix *table.IndexView, colIndex int) *table.Table { - st := ix.Table - col := st.Columns[colIndex] - stats := DescStats - if col.NumDims() > 1 { // nd cannot do qiles - stats = DescStatsND - } - nAgg := len(stats) - dt := table.NewTable().SetNumRows(nAgg) - dt.AddStringColumn("Stat") - dt.AddFloat64TensorColumn(st.ColumnNames[colIndex], col.Shape().Sizes[1:], col.Shape().Names[1:]...) - dtnm := dt.Columns[0] - dtst := dt.Columns[1] - _, csz := col.RowCellSize() - for i, styp := range DescStatsND { - ag := StatIndex(ix, colIndex, styp) - si := i * csz - for j := 0; j < csz; j++ { - dtst.SetFloat1D(si+j, ag[j]) - } - dtnm.SetString1D(i, styp.String()) - } - if col.NumDims() == 1 { - sq := len(DescStatsND) - qs := []float64{.25, .5, .75} - qvs := QuantilesIndex(ix, colIndex, qs) - for i, qv := range qvs { - dtst.SetFloat1D(sq+i, qv) - dtnm.SetString1D(sq+i, DescStats[sq+i].String()) - } - } - return dt -} - -// DescColumn returns a table of standard descriptive stats -// of non-NaN elements in given IndexView indexed view of an -// table.Table, for given column name. -// If name not found, returns error message. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func DescColumn(ix *table.IndexView, column string) (*table.Table, error) { - colIndex, err := ix.Table.ColumnIndex(column) - if err != nil { - return nil, err - } - return DescIndex(ix, colIndex), nil -} diff --git a/tensor/stats/stats/doc.go b/tensor/stats/stats/doc.go deleted file mode 100644 index e65f1ce9da..0000000000 --- a/tensor/stats/stats/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package agg provides aggregation functions operating on IndexView indexed views -of table.Table data, along with standard AggFunc functions that can be used -at any level of aggregation from tensor on up. - -The main functions use names to specify columns, and *Index and *Try versions -are available that operate on column indexes and return errors, respectively. - -See tsragg package for functions that operate directly on a tensor.Tensor -without the indexview indirection. -*/ -package stats diff --git a/tensor/stats/stats/enumgen.go b/tensor/stats/stats/enumgen.go deleted file mode 100644 index 7a760c668e..0000000000 --- a/tensor/stats/stats/enumgen.go +++ /dev/null @@ -1,46 +0,0 @@ -// Code generated by "core generate"; DO NOT EDIT. - -package stats - -import ( - "cogentcore.org/core/enums" -) - -var _StatsValues = []Stats{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19} - -// StatsN is the highest valid value for type Stats, plus one. -const StatsN Stats = 20 - -var _StatsValueMap = map[string]Stats{`Count`: 0, `Sum`: 1, `Prod`: 2, `Min`: 3, `Max`: 4, `MinAbs`: 5, `MaxAbs`: 6, `Mean`: 7, `Var`: 8, `Std`: 9, `Sem`: 10, `L1Norm`: 11, `SumSq`: 12, `L2Norm`: 13, `VarPop`: 14, `StdPop`: 15, `SemPop`: 16, `Median`: 17, `Q1`: 18, `Q3`: 19} - -var _StatsDescMap = map[Stats]string{0: `count of number of elements`, 1: `sum of elements`, 2: `product of elements`, 3: `minimum value`, 4: `max maximum value`, 5: `minimum absolute value`, 6: `maximum absolute value`, 7: `mean mean value`, 8: `sample variance (squared diffs from mean, divided by n-1)`, 9: `sample standard deviation (sqrt of Var)`, 10: `sample standard error of the mean (Std divided by sqrt(n))`, 11: `L1 Norm: sum of absolute values`, 12: `sum of squared values`, 13: `L2 Norm: square-root of sum-of-squares`, 14: `population variance (squared diffs from mean, divided by n)`, 15: `population standard deviation (sqrt of VarPop)`, 16: `population standard error of the mean (StdPop divided by sqrt(n))`, 17: `middle value in sorted ordering`, 18: `Q1 first quartile = 25%ile value = .25 quantile value`, 19: `Q3 third quartile = 75%ile value = .75 quantile value`} - -var _StatsMap = map[Stats]string{0: `Count`, 1: `Sum`, 2: `Prod`, 3: `Min`, 4: `Max`, 5: `MinAbs`, 6: `MaxAbs`, 7: `Mean`, 8: `Var`, 9: `Std`, 10: `Sem`, 11: `L1Norm`, 12: `SumSq`, 13: `L2Norm`, 14: `VarPop`, 15: `StdPop`, 16: `SemPop`, 17: `Median`, 18: `Q1`, 19: `Q3`} - -// String returns the string representation of this Stats value. -func (i Stats) String() string { return enums.String(i, _StatsMap) } - -// SetString sets the Stats value from its string representation, -// and returns an error if the string is invalid. -func (i *Stats) SetString(s string) error { return enums.SetString(i, s, _StatsValueMap, "Stats") } - -// Int64 returns the Stats value as an int64. -func (i Stats) Int64() int64 { return int64(i) } - -// SetInt64 sets the Stats value from an int64. -func (i *Stats) SetInt64(in int64) { *i = Stats(in) } - -// Desc returns the description of the Stats value. -func (i Stats) Desc() string { return enums.Desc(i, _StatsDescMap) } - -// StatsValues returns all possible values for the type Stats. -func StatsValues() []Stats { return _StatsValues } - -// Values returns all possible values for the type Stats. -func (i Stats) Values() []enums.Enum { return enums.Values(_StatsValues) } - -// MarshalText implements the [encoding.TextMarshaler] interface. -func (i Stats) MarshalText() ([]byte, error) { return []byte(i.String()), nil } - -// UnmarshalText implements the [encoding.TextUnmarshaler] interface. -func (i *Stats) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Stats") } diff --git a/tensor/stats/stats/floats.go b/tensor/stats/stats/floats.go deleted file mode 100644 index dff87d1152..0000000000 --- a/tensor/stats/stats/floats.go +++ /dev/null @@ -1,730 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package stats - -import ( - "math" - - "cogentcore.org/core/math32" -) - -// Stat32 returns statistic according to given Stats type applied -// to all non-NaN elements in given slice of float32 -func Stat32(a []float32, stat Stats) float32 { - switch stat { - case Count: - return Count32(a) - case Sum: - return Sum32(a) - case Prod: - return Prod32(a) - case Min: - return Min32(a) - case Max: - return Max32(a) - case MinAbs: - return MinAbs32(a) - case MaxAbs: - return MaxAbs32(a) - case Mean: - return Mean32(a) - case Var: - return Var32(a) - case Std: - return Std32(a) - case Sem: - return Sem32(a) - case L1Norm: - return L1Norm32(a) - case SumSq: - return SumSq32(a) - case L2Norm: - return L2Norm32(a) - case VarPop: - return VarPop32(a) - case StdPop: - return StdPop32(a) - case SemPop: - return SemPop32(a) - // case Median: - // return Median32(a) - // case Q1: - // return Q132(a) - // case Q3: - // return Q332(a) - } - return 0 -} - -// Stat64 returns statistic according to given Stats type applied -// to all non-NaN elements in given slice of float64 -func Stat64(a []float64, stat Stats) float64 { - switch stat { - case Count: - return Count64(a) - case Sum: - return Sum64(a) - case Prod: - return Prod64(a) - case Min: - return Min64(a) - case Max: - return Max64(a) - case MinAbs: - return MinAbs64(a) - case MaxAbs: - return MaxAbs64(a) - case Mean: - return Mean64(a) - case Var: - return Var64(a) - case Std: - return Std64(a) - case Sem: - return Sem64(a) - case L1Norm: - return L1Norm64(a) - case SumSq: - return SumSq64(a) - case L2Norm: - return L2Norm64(a) - case VarPop: - return VarPop64(a) - case StdPop: - return StdPop64(a) - case SemPop: - return SemPop64(a) - // case Median: - // return Median64(a) - // case Q1: - // return Q164(a) - // case Q3: - // return Q364(a) - } - return 0 -} - -/////////////////////////////////////////// -// Count - -// Count32 computes the number of non-NaN vector values. -// Skips NaN's -func Count32(a []float32) float32 { - n := 0 - for _, av := range a { - if math32.IsNaN(av) { - continue - } - n++ - } - return float32(n) -} - -// Count64 computes the number of non-NaN vector values. -// Skips NaN's -func Count64(a []float64) float64 { - n := 0 - for _, av := range a { - if math.IsNaN(av) { - continue - } - n++ - } - return float64(n) -} - -/////////////////////////////////////////// -// Sum - -// Sum32 computes the sum of vector values. -// Skips NaN's -func Sum32(a []float32) float32 { - s := float32(0) - for _, av := range a { - if math32.IsNaN(av) { - continue - } - s += av - } - return s -} - -// Sum64 computes the sum of vector values. -// Skips NaN's -func Sum64(a []float64) float64 { - s := float64(0) - for _, av := range a { - if math.IsNaN(av) { - continue - } - s += av - } - return s -} - -/////////////////////////////////////////// -// Prod - -// Prod32 computes the product of vector values. -// Skips NaN's -func Prod32(a []float32) float32 { - s := float32(1) - for _, av := range a { - if math32.IsNaN(av) { - continue - } - s *= av - } - return s -} - -// Prod64 computes the product of vector values. -// Skips NaN's -func Prod64(a []float64) float64 { - s := float64(1) - for _, av := range a { - if math.IsNaN(av) { - continue - } - s *= av - } - return s -} - -/////////////////////////////////////////// -// Min - -// Min32 computes the max over vector values. -// Skips NaN's -func Min32(a []float32) float32 { - m := float32(math.MaxFloat32) - for _, av := range a { - if math32.IsNaN(av) { - continue - } - m = math32.Min(m, av) - } - return m -} - -// MinIndex32 computes the min over vector values, and returns index of min as well -// Skips NaN's -func MinIndex32(a []float32) (float32, int) { - m := float32(math.MaxFloat32) - mi := -1 - for i, av := range a { - if math32.IsNaN(av) { - continue - } - if av < m { - m = av - mi = i - } - } - return m, mi -} - -// Min64 computes the max over vector values. -// Skips NaN's -func Min64(a []float64) float64 { - m := float64(math.MaxFloat64) - for _, av := range a { - if math.IsNaN(av) { - continue - } - m = math.Min(m, av) - } - return m -} - -// MinIndex64 computes the min over vector values, and returns index of min as well -// Skips NaN's -func MinIndex64(a []float64) (float64, int) { - m := float64(math.MaxFloat64) - mi := -1 - for i, av := range a { - if math.IsNaN(av) { - continue - } - if av < m { - m = av - mi = i - } - } - return m, mi -} - -/////////////////////////////////////////// -// Max - -// Max32 computes the max over vector values. -// Skips NaN's -func Max32(a []float32) float32 { - m := float32(-math.MaxFloat32) - for _, av := range a { - if math32.IsNaN(av) { - continue - } - m = math32.Max(m, av) - } - return m -} - -// MaxIndex32 computes the max over vector values, and returns index of max as well -// Skips NaN's -func MaxIndex32(a []float32) (float32, int) { - m := float32(-math.MaxFloat32) - mi := -1 - for i, av := range a { - if math32.IsNaN(av) { - continue - } - if av > m { - m = av - mi = i - } - } - return m, mi -} - -// Max64 computes the max over vector values. -// Skips NaN's -func Max64(a []float64) float64 { - m := float64(-math.MaxFloat64) - for _, av := range a { - if math.IsNaN(av) { - continue - } - m = math.Max(m, av) - } - return m -} - -// MaxIndex64 computes the max over vector values, and returns index of max as well -// Skips NaN's -func MaxIndex64(a []float64) (float64, int) { - m := float64(-math.MaxFloat64) - mi := -1 - for i, av := range a { - if math.IsNaN(av) { - continue - } - if av > m { - m = av - mi = i - } - } - return m, mi -} - -/////////////////////////////////////////// -// MinAbs - -// MinAbs32 computes the max of absolute value over vector values. -// Skips NaN's -func MinAbs32(a []float32) float32 { - m := float32(math.MaxFloat32) - for _, av := range a { - if math32.IsNaN(av) { - continue - } - m = math32.Min(m, math32.Abs(av)) - } - return m -} - -// MinAbs64 computes the max over vector values. -// Skips NaN's -func MinAbs64(a []float64) float64 { - m := float64(math.MaxFloat64) - for _, av := range a { - if math.IsNaN(av) { - continue - } - m = math.Min(m, math.Abs(av)) - } - return m -} - -/////////////////////////////////////////// -// MaxAbs - -// MaxAbs32 computes the max of absolute value over vector values. -// Skips NaN's -func MaxAbs32(a []float32) float32 { - m := float32(0) - for _, av := range a { - if math32.IsNaN(av) { - continue - } - m = math32.Max(m, math32.Abs(av)) - } - return m -} - -// MaxAbs64 computes the max over vector values. -// Skips NaN's -func MaxAbs64(a []float64) float64 { - m := float64(0) - for _, av := range a { - if math.IsNaN(av) { - continue - } - m = math.Max(m, math.Abs(av)) - } - return m -} - -/////////////////////////////////////////// -// Mean - -// Mean32 computes the mean of the vector (sum / N). -// Skips NaN's -func Mean32(a []float32) float32 { - s := float32(0) - n := 0 - for _, av := range a { - if math32.IsNaN(av) { - continue - } - s += av - n++ - } - if n > 0 { - s /= float32(n) - } - return s -} - -// Mean64 computes the mean of the vector (sum / N). -// Skips NaN's -func Mean64(a []float64) float64 { - s := float64(0) - n := 0 - for _, av := range a { - if math.IsNaN(av) { - continue - } - s += av - n++ - } - if n > 0 { - s /= float64(n) - } - return s -} - -/////////////////////////////////////////// -// Var - -// Var32 returns the sample variance of non-NaN elements. -func Var32(a []float32) float32 { - mean := Mean32(a) - n := 0 - s := float32(0) - for _, av := range a { - if math32.IsNaN(av) { - continue - } - dv := av - mean - s += dv * dv - n++ - } - if n > 1 { - s /= float32(n - 1) - } - return s -} - -// Var64 returns the sample variance of non-NaN elements. -func Var64(a []float64) float64 { - mean := Mean64(a) - n := 0 - s := float64(0) - for _, av := range a { - if math.IsNaN(av) { - continue - } - dv := av - mean - s += dv * dv - n++ - } - if n > 1 { - s /= float64(n - 1) - } - return s -} - -/////////////////////////////////////////// -// Std - -// Std32 returns the sample standard deviation of non-NaN elements in vector. -func Std32(a []float32) float32 { - return math32.Sqrt(Var32(a)) -} - -// Std64 returns the sample standard deviation of non-NaN elements in vector. -func Std64(a []float64) float64 { - return math.Sqrt(Var64(a)) -} - -/////////////////////////////////////////// -// Sem - -// Sem32 returns the sample standard error of the mean of non-NaN elements in vector. -func Sem32(a []float32) float32 { - cnt := Count32(a) - if cnt < 2 { - return 0 - } - return Std32(a) / math32.Sqrt(cnt) -} - -// Sem64 returns the sample standard error of the mean of non-NaN elements in vector. -func Sem64(a []float64) float64 { - cnt := Count64(a) - if cnt < 2 { - return 0 - } - return Std64(a) / math.Sqrt(cnt) -} - -/////////////////////////////////////////// -// L1Norm - -// L1Norm32 computes the sum of absolute values (L1 Norm). -// Skips NaN's -func L1Norm32(a []float32) float32 { - ss := float32(0) - for _, av := range a { - if math32.IsNaN(av) { - continue - } - ss += math32.Abs(av) - } - return ss -} - -// L1Norm64 computes the sum of absolute values (L1 Norm). -// Skips NaN's -func L1Norm64(a []float64) float64 { - ss := float64(0) - for _, av := range a { - if math.IsNaN(av) { - continue - } - ss += math.Abs(av) - } - return ss -} - -/////////////////////////////////////////// -// SumSquares - -// SumSq32 computes the sum-of-squares of vector. -// Skips NaN's. Uses optimized algorithm from BLAS that avoids numerical overflow. -func SumSq32(a []float32) float32 { - n := len(a) - if n < 2 { - if n == 1 { - return math32.Abs(a[0]) - } - return 0 - } - var ( - scale float32 = 0 - sumSquares float32 = 1 - ) - for _, v := range a { - if v == 0 || math32.IsNaN(v) { - continue - } - absxi := math32.Abs(v) - if scale < absxi { - sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) - scale = absxi - } else { - sumSquares = sumSquares + (absxi/scale)*(absxi/scale) - } - } - if math32.IsInf(scale, 1) { - return math32.Inf(1) - } - return scale * scale * sumSquares -} - -// SumSq64 computes the sum-of-squares of vector. -// Skips NaN's. Uses optimized algorithm from BLAS that avoids numerical overflow. -func SumSq64(a []float64) float64 { - n := len(a) - if n < 2 { - if n == 1 { - return math.Abs(a[0]) - } - return 0 - } - var ( - scale float64 = 0 - ss float64 = 1 - ) - for _, v := range a { - if v == 0 || math.IsNaN(v) { - continue - } - absxi := math.Abs(v) - if scale < absxi { - ss = 1 + ss*(scale/absxi)*(scale/absxi) - scale = absxi - } else { - ss = ss + (absxi/scale)*(absxi/scale) - } - } - if math.IsInf(scale, 1) { - return math.Inf(1) - } - return scale * scale * ss -} - -/////////////////////////////////////////// -// L2Norm - -// L2Norm32 computes the square-root of sum-of-squares of vector, i.e., the L2 norm. -// Skips NaN's. Uses optimized algorithm from BLAS that avoids numerical overflow. -func L2Norm32(a []float32) float32 { - n := len(a) - if n < 2 { - if n == 1 { - return math32.Abs(a[0]) - } - return 0 - } - var ( - scale float32 = 0 - ss float32 = 1 - ) - for _, v := range a { - if v == 0 || math32.IsNaN(v) { - continue - } - absxi := math32.Abs(v) - if scale < absxi { - ss = 1 + ss*(scale/absxi)*(scale/absxi) - scale = absxi - } else { - ss = ss + (absxi/scale)*(absxi/scale) - } - } - if math32.IsInf(scale, 1) { - return math32.Inf(1) - } - return scale * math32.Sqrt(ss) -} - -// L2Norm64 computes the square-root of sum-of-squares of vector, i.e., the L2 norm. -// Skips NaN's. Uses optimized algorithm from BLAS that avoids numerical overflow. -func L2Norm64(a []float64) float64 { - n := len(a) - if n < 2 { - if n == 1 { - return math.Abs(a[0]) - } - return 0 - } - var ( - scale float64 = 0 - ss float64 = 1 - ) - for _, v := range a { - if v == 0 || math.IsNaN(v) { - continue - } - absxi := math.Abs(v) - if scale < absxi { - ss = 1 + ss*(scale/absxi)*(scale/absxi) - scale = absxi - } else { - ss = ss + (absxi/scale)*(absxi/scale) - } - } - if math.IsInf(scale, 1) { - return math.Inf(1) - } - return scale * math.Sqrt(ss) -} - -/////////////////////////////////////////// -// VarPop - -// VarPop32 returns the population variance of non-NaN elements. -func VarPop32(a []float32) float32 { - mean := Mean32(a) - n := 0 - s := float32(0) - for _, av := range a { - if math32.IsNaN(av) { - continue - } - dv := av - mean - s += dv * dv - n++ - } - if n > 0 { - s /= float32(n) - } - return s -} - -// VarPop64 returns the population variance of non-NaN elements. -func VarPop64(a []float64) float64 { - mean := Mean64(a) - n := 0 - s := float64(0) - for _, av := range a { - if math.IsNaN(av) { - continue - } - dv := av - mean - s += dv * dv - n++ - } - if n > 0 { - s /= float64(n) - } - return s -} - -/////////////////////////////////////////// -// StdPop - -// StdPop32 returns the population standard deviation of non-NaN elements in vector. -func StdPop32(a []float32) float32 { - return math32.Sqrt(VarPop32(a)) -} - -// StdPop64 returns the population standard deviation of non-NaN elements in vector. -func StdPop64(a []float64) float64 { - return math.Sqrt(VarPop64(a)) -} - -/////////////////////////////////////////// -// SemPop - -// SemPop32 returns the population standard error of the mean of non-NaN elements in vector. -func SemPop32(a []float32) float32 { - cnt := Count32(a) - if cnt < 2 { - return 0 - } - return StdPop32(a) / math32.Sqrt(cnt) -} - -// SemPop64 returns the population standard error of the mean of non-NaN elements in vector. -func SemPop64(a []float64) float64 { - cnt := Count64(a) - if cnt < 2 { - return 0 - } - return StdPop64(a) / math.Sqrt(cnt) -} diff --git a/tensor/stats/stats/floats_test.go b/tensor/stats/stats/floats_test.go deleted file mode 100644 index 2f8dc16d47..0000000000 --- a/tensor/stats/stats/floats_test.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package stats - -import ( - "math" - "testing" - - "cogentcore.org/core/base/tolassert" - "cogentcore.org/core/math32" - "github.com/stretchr/testify/assert" -) - -func TestStats32(t *testing.T) { - vals := []float32{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1} - - results := []float32{11, 5.5, 0, 0, 1, 0, 1, 0.5, 0.11, math32.Sqrt(0.11), math32.Sqrt(0.11) / math32.Sqrt(11), 5.5, 3.85, math32.Sqrt(3.85), 0.1, math32.Sqrt(0.1), math32.Sqrt(0.1) / math32.Sqrt(11)} - - assert.Equal(t, results[Count], Count32(vals)) - assert.Equal(t, results[Sum], Sum32(vals)) - assert.Equal(t, results[Prod], Prod32(vals)) - assert.Equal(t, results[Min], Min32(vals)) - assert.Equal(t, results[Max], Max32(vals)) - assert.Equal(t, results[MinAbs], MinAbs32(vals)) - assert.Equal(t, results[MaxAbs], MaxAbs32(vals)) - assert.Equal(t, results[Mean], Mean32(vals)) - assert.Equal(t, results[Var], Var32(vals)) - assert.Equal(t, results[Std], Std32(vals)) - assert.Equal(t, results[Sem], Sem32(vals)) - assert.Equal(t, results[L1Norm], L1Norm32(vals)) - tolassert.EqualTol(t, results[SumSq], SumSq32(vals), 1.0e-6) - tolassert.EqualTol(t, results[L2Norm], L2Norm32(vals), 1.0e-6) - assert.Equal(t, results[VarPop], VarPop32(vals)) - assert.Equal(t, results[StdPop], StdPop32(vals)) - assert.Equal(t, results[SemPop], SemPop32(vals)) - - for stat := Count; stat <= SemPop; stat++ { - tolassert.EqualTol(t, results[stat], Stat32(vals, stat), 1.0e-6) - } -} - -func TestStats64(t *testing.T) { - vals := []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1} - - results := []float64{11, 5.5, 0, 0, 1, 0, 1, 0.5, 0.11, math.Sqrt(0.11), math.Sqrt(0.11) / math.Sqrt(11), 5.5, 3.85, math.Sqrt(3.85), 0.1, math.Sqrt(0.1), math.Sqrt(0.1) / math.Sqrt(11)} - - assert.Equal(t, results[Count], Count64(vals)) - assert.Equal(t, results[Sum], Sum64(vals)) - assert.Equal(t, results[Prod], Prod64(vals)) - assert.Equal(t, results[Min], Min64(vals)) - assert.Equal(t, results[Max], Max64(vals)) - assert.Equal(t, results[MinAbs], MinAbs64(vals)) - assert.Equal(t, results[MaxAbs], MaxAbs64(vals)) - assert.Equal(t, results[Mean], Mean64(vals)) - tolassert.EqualTol(t, results[Var], Var64(vals), 1.0e-8) - tolassert.EqualTol(t, results[Std], Std64(vals), 1.0e-8) - tolassert.EqualTol(t, results[Sem], Sem64(vals), 1.0e-8) - assert.Equal(t, results[L1Norm], L1Norm64(vals)) - tolassert.EqualTol(t, results[SumSq], SumSq64(vals), 1.0e-8) - tolassert.EqualTol(t, results[L2Norm], L2Norm64(vals), 1.0e-8) - assert.Equal(t, results[VarPop], VarPop64(vals)) - assert.Equal(t, results[StdPop], StdPop64(vals)) - assert.Equal(t, results[SemPop], SemPop64(vals)) - - for stat := Count; stat <= SemPop; stat++ { - tolassert.EqualTol(t, results[stat], Stat64(vals, stat), 1.0e-8) - } -} diff --git a/tensor/stats/stats/funcs.go b/tensor/stats/stats/funcs.go deleted file mode 100644 index 76f23dacef..0000000000 --- a/tensor/stats/stats/funcs.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package stats - -import "math" - -// These are standard StatFunc functions that can operate on tensor.Tensor -// or table.Table, using float64 values - -// StatFunc is an statistic function that incrementally updates agg -// aggregation value from each element in the tensor in turn. -// Returns new agg value that will be passed into next item as agg. -type StatFunc func(idx int, val float64, agg float64) float64 - -// CountFunc is an StatFunc that computes number of elements (non-Null, non-NaN) -// Use 0 as initial value. -func CountFunc(idx int, val float64, agg float64) float64 { - return agg + 1 -} - -// SumFunc is an StatFunc that computes a sum aggregate. -// use 0 as initial value. -func SumFunc(idx int, val float64, agg float64) float64 { - return agg + val -} - -// Prodfunc is an StatFunc that computes a product aggregate. -// use 1 as initial value. -func ProdFunc(idx int, val float64, agg float64) float64 { - return agg * val -} - -// MinFunc is an StatFunc that computes a min aggregate. -// use math.MaxFloat64 for initial agg value. -func MinFunc(idx int, val float64, agg float64) float64 { - return math.Min(agg, val) -} - -// MaxFunc is an StatFunc that computes a max aggregate. -// use -math.MaxFloat64 for initial agg value. -func MaxFunc(idx int, val float64, agg float64) float64 { - return math.Max(agg, val) -} - -// MinAbsFunc is an StatFunc that computes a min aggregate. -// use math.MaxFloat64 for initial agg value. -func MinAbsFunc(idx int, val float64, agg float64) float64 { - return math.Min(agg, math.Abs(val)) -} - -// MaxAbsFunc is an StatFunc that computes a max aggregate. -// use -math.MaxFloat64 for initial agg value. -func MaxAbsFunc(idx int, val float64, agg float64) float64 { - return math.Max(agg, math.Abs(val)) -} - -// L1NormFunc is an StatFunc that computes the L1 norm: sum of absolute values -// use 0 as initial value. -func L1NormFunc(idx int, val float64, agg float64) float64 { - return agg + math.Abs(val) -} - -// Note: SumSq is not numerically stable for large N in simple func form. diff --git a/tensor/stats/stats/if.go b/tensor/stats/stats/if.go deleted file mode 100644 index fc29404cf8..0000000000 --- a/tensor/stats/stats/if.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package stats - -import ( - "cogentcore.org/core/base/errors" - "cogentcore.org/core/tensor/table" -) - -// IfFunc is used for the *If aggregators -- counted if it returns true -type IfFunc func(idx int, val float64) bool - -/////////////////////////////////////////////////// -// CountIf - -// CountIfIndex returns the count of true return values for given IfFunc on -// non-NaN elements in given IndexView indexed view of an -// table.Table, for given column index. -// Return value(s) is size of column cell: 1 for scalar 1D columns -// and N for higher-dimensional columns. -func CountIfIndex(ix *table.IndexView, colIndex int, iffun IfFunc) []float64 { - return StatIndexFunc(ix, colIndex, 0, func(idx int, val float64, agg float64) float64 { - if iffun(idx, val) { - return agg + 1 - } - return agg - }) -} - -// CountIfColumn returns the count of true return values for given IfFunc on -// non-NaN elements in given IndexView indexed view of an -// table.Table, for given column name. -// If name not found, nil is returned. -// Return value(s) is size of column cell: 1 for scalar 1D columns -// and N for higher-dimensional columns. -func CountIfColumn(ix *table.IndexView, column string, iffun IfFunc) []float64 { - colIndex := errors.Log1(ix.Table.ColumnIndex(column)) - if colIndex == -1 { - return nil - } - return CountIfIndex(ix, colIndex, iffun) -} - -/////////////////////////////////////////////////// -// PropIf - -// PropIfIndex returns the proportion (0-1) of true return values for given IfFunc on -// non-Null, non-NaN elements in given IndexView indexed view of an -// table.Table, for given column index. -// Return value(s) is size of column cell: 1 for scalar 1D columns -// and N for higher-dimensional columns. -func PropIfIndex(ix *table.IndexView, colIndex int, iffun IfFunc) []float64 { - cnt := CountIndex(ix, colIndex) - if cnt == nil { - return nil - } - pif := CountIfIndex(ix, colIndex, iffun) - for i := range pif { - if cnt[i] > 0 { - pif[i] /= cnt[i] - } - } - return pif -} - -// PropIfColumn returns the proportion (0-1) of true return values for given IfFunc on -// non-NaN elements in given IndexView indexed view of an -// table.Table, for given column name. -// If name not found, nil is returned -- use Try version for error message. -// Return value(s) is size of column cell: 1 for scalar 1D columns -// and N for higher-dimensional columns. -func PropIfColumn(ix *table.IndexView, column string, iffun IfFunc) []float64 { - colIndex := errors.Log1(ix.Table.ColumnIndex(column)) - if colIndex == -1 { - return nil - } - return PropIfIndex(ix, colIndex, iffun) -} - -/////////////////////////////////////////////////// -// PctIf - -// PctIfIndex returns the percentage (0-100) of true return values for given IfFunc on -// non-Null, non-NaN elements in given IndexView indexed view of an -// table.Table, for given column index. -// Return value(s) is size of column cell: 1 for scalar 1D columns -// and N for higher-dimensional columns. -func PctIfIndex(ix *table.IndexView, colIndex int, iffun IfFunc) []float64 { - cnt := CountIndex(ix, colIndex) - if cnt == nil { - return nil - } - pif := CountIfIndex(ix, colIndex, iffun) - for i := range pif { - if cnt[i] > 0 { - pif[i] = 100.0 * (pif[i] / cnt[i]) - } - } - return pif -} - -// PctIfColumn returns the percentage (0-100) of true return values for given IfFunc on -// non-Null, non-NaN elements in given IndexView indexed view of an -// table.Table, for given column name. -// If name not found, nil is returned -- use Try version for error message. -// Return value(s) is size of column cell: 1 for scalar 1D columns -// and N for higher-dimensional columns. -func PctIfColumn(ix *table.IndexView, column string, iffun IfFunc) []float64 { - colIndex := errors.Log1(ix.Table.ColumnIndex(column)) - if colIndex == -1 { - return nil - } - return PctIfIndex(ix, colIndex, iffun) -} diff --git a/tensor/stats/stats/indexview.go b/tensor/stats/stats/indexview.go deleted file mode 100644 index e849f4cfba..0000000000 --- a/tensor/stats/stats/indexview.go +++ /dev/null @@ -1,761 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package stats - -import ( - "math" - - "cogentcore.org/core/base/errors" - "cogentcore.org/core/tensor/table" -) - -// Every IndexView Stats method in this file follows one of these signatures: - -// IndexViewFuncIndex is a stats function operating on IndexView, taking a column index arg -type IndexViewFuncIndex func(ix *table.IndexView, colIndex int) []float64 - -// IndexViewFuncColumn is a stats function operating on IndexView, taking a column name arg -type IndexViewFuncColumn func(ix *table.IndexView, column string) []float64 - -// StatIndex returns IndexView statistic according to given Stats type applied -// to all non-NaN elements in given IndexView indexed view of -// an table.Table, for given column index. -// Return value(s) is size of column cell: 1 for scalar 1D columns -// and N for higher-dimensional columns. -func StatIndex(ix *table.IndexView, colIndex int, stat Stats) []float64 { - switch stat { - case Count: - return CountIndex(ix, colIndex) - case Sum: - return SumIndex(ix, colIndex) - case Prod: - return ProdIndex(ix, colIndex) - case Min: - return MinIndex(ix, colIndex) - case Max: - return MaxIndex(ix, colIndex) - case MinAbs: - return MinAbsIndex(ix, colIndex) - case MaxAbs: - return MaxAbsIndex(ix, colIndex) - case Mean: - return MeanIndex(ix, colIndex) - case Var: - return VarIndex(ix, colIndex) - case Std: - return StdIndex(ix, colIndex) - case Sem: - return SemIndex(ix, colIndex) - case L1Norm: - return L1NormIndex(ix, colIndex) - case SumSq: - return SumSqIndex(ix, colIndex) - case L2Norm: - return L2NormIndex(ix, colIndex) - case VarPop: - return VarPopIndex(ix, colIndex) - case StdPop: - return StdPopIndex(ix, colIndex) - case SemPop: - return SemPopIndex(ix, colIndex) - case Median: - return MedianIndex(ix, colIndex) - case Q1: - return Q1Index(ix, colIndex) - case Q3: - return Q3Index(ix, colIndex) - } - return nil -} - -// StatColumn returns IndexView statistic according to given Stats type applied -// to all non-NaN elements in given IndexView indexed view of -// an table.Table, for given column name. -// If name not found, returns error message. -// Return value(s) is size of column cell: 1 for scalar 1D columns -// and N for higher-dimensional columns. -func StatColumn(ix *table.IndexView, column string, stat Stats) ([]float64, error) { - colIndex, err := ix.Table.ColumnIndex(column) - if err != nil { - return nil, err - } - rv := StatIndex(ix, colIndex, stat) - return rv, nil -} - -// StatIndexFunc applies given StatFunc function to each element in the given column, -// using float64 conversions of the values. ini is the initial value for the agg variable. -// Operates independently over each cell on n-dimensional columns and returns the result as a slice -// of values per cell. -func StatIndexFunc(ix *table.IndexView, colIndex int, ini float64, fun StatFunc) []float64 { - cl := ix.Table.Columns[colIndex] - _, csz := cl.RowCellSize() - - ag := make([]float64, csz) - for i := range ag { - ag[i] = ini - } - if csz == 1 { - for _, srw := range ix.Indexes { - val := cl.Float1D(srw) - if !math.IsNaN(val) { - ag[0] = fun(srw, val, ag[0]) - } - } - } else { - for _, srw := range ix.Indexes { - si := srw * csz - for j := range ag { - val := cl.Float1D(si + j) - if !math.IsNaN(val) { - ag[j] = fun(si+j, val, ag[j]) - } - } - } - } - return ag -} - -/////////////////////////////////////////////////// -// Count - -// CountIndex returns the count of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column index. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func CountIndex(ix *table.IndexView, colIndex int) []float64 { - return StatIndexFunc(ix, colIndex, 0, CountFunc) -} - -// CountColumn returns the count of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column name. -// If name not found, nil is returned. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func CountColumn(ix *table.IndexView, column string) []float64 { - colIndex := errors.Log1(ix.Table.ColumnIndex(column)) - if colIndex == -1 { - return nil - } - return CountIndex(ix, colIndex) -} - -/////////////////////////////////////////////////// -// Sum - -// SumIndex returns the sum of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column index. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func SumIndex(ix *table.IndexView, colIndex int) []float64 { - return StatIndexFunc(ix, colIndex, 0, SumFunc) -} - -// SumColumn returns the sum of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column name. -// If name not found, nil is returned. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func SumColumn(ix *table.IndexView, column string) []float64 { - colIndex := errors.Log1(ix.Table.ColumnIndex(column)) - if colIndex == -1 { - return nil - } - return SumIndex(ix, colIndex) -} - -/////////////////////////////////////////////////// -// Prod - -// ProdIndex returns the product of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column index. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func ProdIndex(ix *table.IndexView, colIndex int) []float64 { - return StatIndexFunc(ix, colIndex, 1, ProdFunc) -} - -// ProdColumn returns the product of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column name. -// If name not found, nil is returned. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func ProdColumn(ix *table.IndexView, column string) []float64 { - colIndex := errors.Log1(ix.Table.ColumnIndex(column)) - if colIndex == -1 { - return nil - } - return ProdIndex(ix, colIndex) -} - -/////////////////////////////////////////////////// -// Min - -// MinIndex returns the minimum of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column index. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func MinIndex(ix *table.IndexView, colIndex int) []float64 { - return StatIndexFunc(ix, colIndex, math.MaxFloat64, MinFunc) -} - -// MinColumn returns the minimum of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column name. -// If name not found, nil is returned. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func MinColumn(ix *table.IndexView, column string) []float64 { - colIndex := errors.Log1(ix.Table.ColumnIndex(column)) - if colIndex == -1 { - return nil - } - return MinIndex(ix, colIndex) -} - -/////////////////////////////////////////////////// -// Max - -// MaxIndex returns the maximum of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column index. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func MaxIndex(ix *table.IndexView, colIndex int) []float64 { - return StatIndexFunc(ix, colIndex, -math.MaxFloat64, MaxFunc) -} - -// MaxColumn returns the maximum of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column name. -// If name not found, nil is returned. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func MaxColumn(ix *table.IndexView, column string) []float64 { - colIndex := errors.Log1(ix.Table.ColumnIndex(column)) - if colIndex == -1 { - return nil - } - return MaxIndex(ix, colIndex) -} - -/////////////////////////////////////////////////// -// MinAbs - -// MinAbsIndex returns the minimum of abs of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column index. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func MinAbsIndex(ix *table.IndexView, colIndex int) []float64 { - return StatIndexFunc(ix, colIndex, math.MaxFloat64, MinAbsFunc) -} - -// MinAbsColumn returns the minimum of abs of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column name. -// If name not found, nil is returned. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func MinAbsColumn(ix *table.IndexView, column string) []float64 { - colIndex := errors.Log1(ix.Table.ColumnIndex(column)) - if colIndex == -1 { - return nil - } - return MinAbsIndex(ix, colIndex) -} - -/////////////////////////////////////////////////// -// MaxAbs - -// MaxAbsIndex returns the maximum of abs of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column index. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func MaxAbsIndex(ix *table.IndexView, colIndex int) []float64 { - return StatIndexFunc(ix, colIndex, -math.MaxFloat64, MaxAbsFunc) -} - -// MaxAbsColumn returns the maximum of abs of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column name. -// If name not found, nil is returned. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func MaxAbsColumn(ix *table.IndexView, column string) []float64 { - colIndex := errors.Log1(ix.Table.ColumnIndex(column)) - if colIndex == -1 { - return nil - } - return MaxAbsIndex(ix, colIndex) -} - -/////////////////////////////////////////////////// -// Mean - -// MeanIndex returns the mean of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column index. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func MeanIndex(ix *table.IndexView, colIndex int) []float64 { - cnt := CountIndex(ix, colIndex) - if cnt == nil { - return nil - } - mean := SumIndex(ix, colIndex) - for i := range mean { - if cnt[i] > 0 { - mean[i] /= cnt[i] - } - } - return mean -} - -// MeanColumn returns the mean of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column name. -// If name not found, nil is returned. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func MeanColumn(ix *table.IndexView, column string) []float64 { - colIndex := errors.Log1(ix.Table.ColumnIndex(column)) - if colIndex == -1 { - return nil - } - return MeanIndex(ix, colIndex) -} - -/////////////////////////////////////////////////// -// Var - -// VarIndex returns the sample variance of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column index. -// Sample variance is normalized by 1/(n-1) -- see VarPop version for 1/n normalization. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func VarIndex(ix *table.IndexView, colIndex int) []float64 { - cnt := CountIndex(ix, colIndex) - if cnt == nil { - return nil - } - mean := SumIndex(ix, colIndex) - for i := range mean { - if cnt[i] > 0 { - mean[i] /= cnt[i] - } - } - col := ix.Table.Columns[colIndex] - _, csz := col.RowCellSize() - vr := StatIndexFunc(ix, colIndex, 0, func(idx int, val float64, agg float64) float64 { - cidx := idx % csz - dv := val - mean[cidx] - return agg + dv*dv - }) - for i := range vr { - if cnt[i] > 1 { - vr[i] /= (cnt[i] - 1) - } - } - return vr -} - -// VarColumn returns the sample variance of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column name. -// Sample variance is normalized by 1/(n-1) -- see VarPop version for 1/n normalization. -// If name not found, nil is returned. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func VarColumn(ix *table.IndexView, column string) []float64 { - colIndex := errors.Log1(ix.Table.ColumnIndex(column)) - if colIndex == -1 { - return nil - } - return VarIndex(ix, colIndex) -} - -/////////////////////////////////////////////////// -// Std - -// StdIndex returns the sample std deviation of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column index. -// Sample std deviation is normalized by 1/(n-1) -- see StdPop version for 1/n normalization. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func StdIndex(ix *table.IndexView, colIndex int) []float64 { - std := VarIndex(ix, colIndex) - for i := range std { - std[i] = math.Sqrt(std[i]) - } - return std -} - -// StdColumn returns the sample std deviation of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column name. -// Sample std deviation is normalized by 1/(n-1) -- see StdPop version for 1/n normalization. -// If name not found, nil is returned. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func StdColumn(ix *table.IndexView, column string) []float64 { - colIndex := errors.Log1(ix.Table.ColumnIndex(column)) - if colIndex == -1 { - return nil - } - return StdIndex(ix, colIndex) -} - -/////////////////////////////////////////////////// -// Sem - -// SemIndex returns the sample standard error of the mean of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column index. -// Sample sem is normalized by 1/(n-1) -- see SemPop version for 1/n normalization. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func SemIndex(ix *table.IndexView, colIndex int) []float64 { - cnt := CountIndex(ix, colIndex) - if cnt == nil { - return nil - } - sem := StdIndex(ix, colIndex) - for i := range sem { - if cnt[i] > 0 { - sem[i] /= math.Sqrt(cnt[i]) - } - } - return sem -} - -// SemColumn returns the sample standard error of the mean of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column name. -// Sample sem is normalized by 1/(n-1) -- see SemPop version for 1/n normalization. -// If name not found, nil is returned. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func SemColumn(ix *table.IndexView, column string) []float64 { - colIndex := errors.Log1(ix.Table.ColumnIndex(column)) - if colIndex == -1 { - return nil - } - return SemIndex(ix, colIndex) -} - -/////////////////////////////////////////////////// -// L1Norm - -// L1NormIndex returns the L1 norm (sum abs values) of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column index. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func L1NormIndex(ix *table.IndexView, colIndex int) []float64 { - return StatIndexFunc(ix, colIndex, 0, L1NormFunc) -} - -// L1NormColumn returns the L1 norm (sum abs values) of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column name. -// If name not found, nil is returned. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func L1NormColumn(ix *table.IndexView, column string) []float64 { - colIndex := errors.Log1(ix.Table.ColumnIndex(column)) - if colIndex == -1 { - return nil - } - return L1NormIndex(ix, colIndex) -} - -/////////////////////////////////////////////////// -// SumSq - -// SumSqIndex returns the sum-of-squares of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column index. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func SumSqIndex(ix *table.IndexView, colIndex int) []float64 { - cl := ix.Table.Columns[colIndex] - _, csz := cl.RowCellSize() - - scale := make([]float64, csz) - ss := make([]float64, csz) - for i := range ss { - ss[i] = 1 - } - n := len(ix.Indexes) - if csz == 1 { - if n < 2 { - if n == 1 { - ss[0] = math.Abs(cl.Float1D(ix.Indexes[0])) - return ss - } - return scale // all 0s - } - for _, srw := range ix.Indexes { - v := cl.Float1D(srw) - absxi := math.Abs(v) - if scale[0] < absxi { - ss[0] = 1 + ss[0]*(scale[0]/absxi)*(scale[0]/absxi) - scale[0] = absxi - } else { - ss[0] = ss[0] + (absxi/scale[0])*(absxi/scale[0]) - } - } - if math.IsInf(scale[0], 1) { - ss[0] = math.Inf(1) - } else { - ss[0] = scale[0] * scale[0] * ss[0] - } - } else { - if n < 2 { - if n == 1 { - si := csz * ix.Indexes[0] - for j := range csz { - ss[j] = math.Abs(cl.Float1D(si + j)) - } - return ss - } - return scale // all 0s - } - for _, srw := range ix.Indexes { - si := srw * csz - for j := range ss { - v := cl.Float1D(si + j) - absxi := math.Abs(v) - if scale[j] < absxi { - ss[j] = 1 + ss[j]*(scale[j]/absxi)*(scale[j]/absxi) - scale[j] = absxi - } else { - ss[j] = ss[j] + (absxi/scale[j])*(absxi/scale[j]) - } - } - } - for j := range ss { - if math.IsInf(scale[j], 1) { - ss[j] = math.Inf(1) - } else { - ss[j] = scale[j] * scale[j] * ss[j] - } - } - } - return ss -} - -// SumSqColumn returns the sum-of-squares of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column name. -// If name not found, nil is returned. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func SumSqColumn(ix *table.IndexView, column string) []float64 { - colIndex := errors.Log1(ix.Table.ColumnIndex(column)) - if colIndex == -1 { - return nil - } - return SumSqIndex(ix, colIndex) -} - -/////////////////////////////////////////////////// -// L2Norm - -// L2NormIndex returns the L2 norm (square root of sum-of-squares) of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column index. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func L2NormIndex(ix *table.IndexView, colIndex int) []float64 { - ss := SumSqIndex(ix, colIndex) - for i := range ss { - ss[i] = math.Sqrt(ss[i]) - } - return ss -} - -// L2NormColumn returns the L2 norm (square root of sum-of-squares) of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column name. -// If name not found, nil is returned. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func L2NormColumn(ix *table.IndexView, column string) []float64 { - colIndex := errors.Log1(ix.Table.ColumnIndex(column)) - if colIndex == -1 { - return nil - } - return L2NormIndex(ix, colIndex) -} - -/////////////////////////////////////////////////// -// VarPop - -// VarPopIndex returns the population variance of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column index. -// population variance is normalized by 1/n -- see Var version for 1/(n-1) sample normalization. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func VarPopIndex(ix *table.IndexView, colIndex int) []float64 { - cnt := CountIndex(ix, colIndex) - if cnt == nil { - return nil - } - mean := SumIndex(ix, colIndex) - for i := range mean { - if cnt[i] > 0 { - mean[i] /= cnt[i] - } - } - col := ix.Table.Columns[colIndex] - _, csz := col.RowCellSize() - vr := StatIndexFunc(ix, colIndex, 0, func(idx int, val float64, agg float64) float64 { - cidx := idx % csz - dv := val - mean[cidx] - return agg + dv*dv - }) - for i := range vr { - if cnt[i] > 0 { - vr[i] /= cnt[i] - } - } - return vr -} - -// VarPopColumn returns the population variance of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column name. -// population variance is normalized by 1/n -- see Var version for 1/(n-1) sample normalization. -// If name not found, nil is returned. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func VarPopColumn(ix *table.IndexView, column string) []float64 { - colIndex := errors.Log1(ix.Table.ColumnIndex(column)) - if colIndex == -1 { - return nil - } - return VarPopIndex(ix, colIndex) -} - -/////////////////////////////////////////////////// -// StdPop - -// StdPopIndex returns the population std deviation of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column index. -// population std dev is normalized by 1/n -- see Var version for 1/(n-1) sample normalization. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func StdPopIndex(ix *table.IndexView, colIndex int) []float64 { - std := VarPopIndex(ix, colIndex) - for i := range std { - std[i] = math.Sqrt(std[i]) - } - return std -} - -// StdPopColumn returns the population std deviation of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column name. -// population std dev is normalized by 1/n -- see Var version for 1/(n-1) sample normalization. -// If name not found, nil is returned. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func StdPopColumn(ix *table.IndexView, column string) []float64 { - colIndex := errors.Log1(ix.Table.ColumnIndex(column)) - if colIndex == -1 { - return nil - } - return StdPopIndex(ix, colIndex) -} - -/////////////////////////////////////////////////// -// SemPop - -// SemPopIndex returns the population standard error of the mean of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column index. -// population sem is normalized by 1/n -- see Var version for 1/(n-1) sample normalization. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func SemPopIndex(ix *table.IndexView, colIndex int) []float64 { - cnt := CountIndex(ix, colIndex) - if cnt == nil { - return nil - } - sem := StdPopIndex(ix, colIndex) - for i := range sem { - if cnt[i] > 0 { - sem[i] /= math.Sqrt(cnt[i]) - } - } - return sem -} - -// SemPopColumn returns the standard error of the mean of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column name. -// population sem is normalized by 1/n -- see Var version for 1/(n-1) sample normalization. -// If name not found, nil is returned. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func SemPopColumn(ix *table.IndexView, column string) []float64 { - colIndex := errors.Log1(ix.Table.ColumnIndex(column)) - if colIndex == -1 { - return nil - } - return SemPopIndex(ix, colIndex) -} - -/////////////////////////////////////////////////// -// Median - -// MedianIndex returns the median of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column index. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func MedianIndex(ix *table.IndexView, colIndex int) []float64 { - return QuantilesIndex(ix, colIndex, []float64{.5}) -} - -// MedianColumn returns the median of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column name. -// If name not found, nil is returned. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func MedianColumn(ix *table.IndexView, column string) []float64 { - colIndex := errors.Log1(ix.Table.ColumnIndex(column)) - if colIndex == -1 { - return nil - } - return MedianIndex(ix, colIndex) -} - -/////////////////////////////////////////////////// -// Q1 - -// Q1Index returns the first quartile of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column index. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func Q1Index(ix *table.IndexView, colIndex int) []float64 { - return QuantilesIndex(ix, colIndex, []float64{.25}) -} - -// Q1Column returns the first quartile of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column name. -// If name not found, nil is returned. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func Q1Column(ix *table.IndexView, column string) []float64 { - colIndex := errors.Log1(ix.Table.ColumnIndex(column)) - if colIndex == -1 { - return nil - } - return Q1Index(ix, colIndex) -} - -/////////////////////////////////////////////////// -// Q3 - -// Q3Index returns the third quartile of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column index. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func Q3Index(ix *table.IndexView, colIndex int) []float64 { - return QuantilesIndex(ix, colIndex, []float64{.75}) -} - -// Q3Column returns the third quartile of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column name. -// If name not found, nil is returned. -// Return value is size of each column cell -- 1 for scalar 1D columns -// and N for higher-dimensional columns. -func Q3Column(ix *table.IndexView, column string) []float64 { - colIndex := errors.Log1(ix.Table.ColumnIndex(column)) - if colIndex == -1 { - return nil - } - return Q3Index(ix, colIndex) -} diff --git a/tensor/stats/stats/indexview_test.go b/tensor/stats/stats/indexview_test.go deleted file mode 100644 index 31c541bdd5..0000000000 --- a/tensor/stats/stats/indexview_test.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package stats - -import ( - "math" - "testing" - - "cogentcore.org/core/base/errors" - "cogentcore.org/core/base/tolassert" - "cogentcore.org/core/tensor/table" - - "github.com/stretchr/testify/assert" -) - -func TestIndexView(t *testing.T) { - dt := table.NewTable().SetNumRows(5) - dt.AddFloat64Column("data") - dt.SetFloat("data", 0, 1) - dt.SetFloat("data", 1, 2) - dt.SetFloat("data", 2, 3) - dt.SetFloat("data", 3, 4) - dt.SetFloat("data", 4, 5) - - ix := table.NewIndexView(dt) - - results := []float64{5, 15, 120, 1, 5, 1, 5, 3, 2.5, math.Sqrt(2.5), math.Sqrt(2.5) / math.Sqrt(5), - 15, 55, math.Sqrt(55), 2, math.Sqrt(2), math.Sqrt(2) / math.Sqrt(5), 3, 2, 4} - - assert.Equal(t, results[Count:Count+1], CountColumn(ix, "data")) - assert.Equal(t, results[Sum:Sum+1], SumColumn(ix, "data")) - assert.Equal(t, results[Prod:Prod+1], ProdColumn(ix, "data")) - assert.Equal(t, results[Min:Min+1], MinColumn(ix, "data")) - assert.Equal(t, results[Max:Max+1], MaxColumn(ix, "data")) - assert.Equal(t, results[MinAbs:MinAbs+1], MinAbsColumn(ix, "data")) - assert.Equal(t, results[MaxAbs:MaxAbs+1], MaxAbsColumn(ix, "data")) - assert.Equal(t, results[Mean:Mean+1], MeanColumn(ix, "data")) - assert.Equal(t, results[Var:Var+1], VarColumn(ix, "data")) - assert.Equal(t, results[Std:Std+1], StdColumn(ix, "data")) - assert.Equal(t, results[Sem:Sem+1], SemColumn(ix, "data")) - assert.Equal(t, results[L1Norm:L1Norm+1], L1NormColumn(ix, "data")) - tolassert.EqualTol(t, results[SumSq], SumSqColumn(ix, "data")[0], 1.0e-8) - tolassert.EqualTol(t, results[L2Norm], L2NormColumn(ix, "data")[0], 1.0e-8) - assert.Equal(t, results[VarPop:VarPop+1], VarPopColumn(ix, "data")) - assert.Equal(t, results[StdPop:StdPop+1], StdPopColumn(ix, "data")) - assert.Equal(t, results[SemPop:SemPop+1], SemPopColumn(ix, "data")) - assert.Equal(t, results[Median:Median+1], MedianColumn(ix, "data")) - assert.Equal(t, results[Q1:Q1+1], Q1Column(ix, "data")) - assert.Equal(t, results[Q3:Q3+1], Q3Column(ix, "data")) - - for _, stat := range StatsValues() { - tolassert.EqualTol(t, results[stat], errors.Log1(StatColumn(ix, "data", stat))[0], 1.0e-8) - } - - desc := DescAll(ix) - assert.Equal(t, len(DescStats), desc.Rows) - assert.Equal(t, 2, desc.NumColumns()) - - for ri, stat := range DescStats { - dv := desc.Float("data", ri) - // fmt.Println(ri, ag.String(), dv, results[ag]) - assert.Equal(t, results[stat], dv) - } - - desc, err := DescColumn(ix, "data") - if err != nil { - t.Error(err) - } - assert.Equal(t, len(DescStats), desc.Rows) - assert.Equal(t, 2, desc.NumColumns()) - for ri, stat := range DescStats { - dv := desc.Float("data", ri) - // fmt.Println(ri, ag.String(), dv, results[ag]) - assert.Equal(t, results[stat], dv) - } - - pcts := PctIfColumn(ix, "data", func(idx int, val float64) bool { - return val > 2 - }) - assert.Equal(t, []float64{60}, pcts) - - props := PropIfColumn(ix, "data", func(idx int, val float64) bool { - return val > 2 - }) - assert.Equal(t, []float64{0.6}, props) -} diff --git a/tensor/stats/stats/quantiles.go b/tensor/stats/stats/quantiles.go deleted file mode 100644 index eb191691a4..0000000000 --- a/tensor/stats/stats/quantiles.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package stats - -import ( - "math" - - "cogentcore.org/core/base/errors" - "cogentcore.org/core/tensor/table" -) - -// QuantilesIndex returns the given quantile(s) of non-NaN elements in given -// IndexView indexed view of an table.Table, for given column index. -// Column must be a 1d Column -- returns nil for n-dimensional columns. -// qs are 0-1 values, 0 = min, 1 = max, .5 = median, etc. Uses linear interpolation. -// Because this requires a sort, it is more efficient to get as many quantiles -// as needed in one pass. -func QuantilesIndex(ix *table.IndexView, colIndex int, qs []float64) []float64 { - nq := len(qs) - if nq == 0 { - return nil - } - col := ix.Table.Columns[colIndex] - if col.NumDims() > 1 { // only valid for 1D - return nil - } - rvs := make([]float64, nq) - six := ix.Clone() // leave original indexes intact - six.Filter(func(et *table.Table, row int) bool { // get rid of NaNs in this column - if math.IsNaN(col.Float1D(row)) { - return false - } - return true - }) - six.SortColumn(colIndex, true) - sz := len(six.Indexes) - 1 // length of our own index list - fsz := float64(sz) - for i, q := range qs { - val := 0.0 - qi := q * fsz - lwi := math.Floor(qi) - lwii := int(lwi) - if lwii >= sz { - val = col.Float1D(six.Indexes[sz]) - } else if lwii < 0 { - val = col.Float1D(six.Indexes[0]) - } else { - phi := qi - lwi - lwv := col.Float1D(six.Indexes[lwii]) - hiv := col.Float1D(six.Indexes[lwii+1]) - val = (1-phi)*lwv + phi*hiv - } - rvs[i] = val - } - return rvs -} - -// Quantiles returns the given quantile(s) of non-Null, non-NaN elements in given -// IndexView indexed view of an table.Table, for given column name. -// If name not found, nil is returned -- use Try version for error message. -// Column must be a 1d Column -- returns nil for n-dimensional columns. -// qs are 0-1 values, 0 = min, 1 = max, .5 = median, etc. Uses linear interpolation. -// Because this requires a sort, it is more efficient to get as many quantiles -// as needed in one pass. -func Quantiles(ix *table.IndexView, column string, qs []float64) []float64 { - colIndex := errors.Log1(ix.Table.ColumnIndex(column)) - if colIndex == -1 { - return nil - } - return QuantilesIndex(ix, colIndex, qs) -} diff --git a/tensor/stats/stats/stats.go b/tensor/stats/stats/stats.go deleted file mode 100644 index a77a2a15a8..0000000000 --- a/tensor/stats/stats/stats.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package stats - -//go:generate core generate - -// Stats is a list of different standard aggregation functions, which can be used -// to choose an aggregation function -type Stats int32 //enums:enum - -const ( - // count of number of elements - Count Stats = iota - - // sum of elements - Sum - - // product of elements - Prod - - // minimum value - Min - - // max maximum value - Max - - // minimum absolute value - MinAbs - - // maximum absolute value - MaxAbs - - // mean mean value - Mean - - // sample variance (squared diffs from mean, divided by n-1) - Var - - // sample standard deviation (sqrt of Var) - Std - - // sample standard error of the mean (Std divided by sqrt(n)) - Sem - - // L1 Norm: sum of absolute values - L1Norm - - // sum of squared values - SumSq - - // L2 Norm: square-root of sum-of-squares - L2Norm - - // population variance (squared diffs from mean, divided by n) - VarPop - - // population standard deviation (sqrt of VarPop) - StdPop - - // population standard error of the mean (StdPop divided by sqrt(n)) - SemPop - - // middle value in sorted ordering - Median - - // Q1 first quartile = 25%ile value = .25 quantile value - Q1 - - // Q3 third quartile = 75%ile value = .75 quantile value - Q3 -) diff --git a/tensor/stats/stats/table.go b/tensor/stats/stats/table.go deleted file mode 100644 index 018fb75a9a..0000000000 --- a/tensor/stats/stats/table.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package stats - -import ( - "reflect" - - "cogentcore.org/core/tensor/table" -) - -// MeanTables returns an table.Table with the mean values across all float -// columns of the input tables, which must have the same columns but not -// necessarily the same number of rows. -func MeanTables(dts []*table.Table) *table.Table { - nt := len(dts) - if nt == 0 { - return nil - } - maxRows := 0 - var maxdt *table.Table - for _, dt := range dts { - if dt.Rows > maxRows { - maxRows = dt.Rows - maxdt = dt - } - } - if maxRows == 0 { - return nil - } - ot := maxdt.Clone() - - // N samples per row - rns := make([]int, maxRows) - for _, dt := range dts { - dnr := dt.Rows - mx := min(dnr, maxRows) - for ri := 0; ri < mx; ri++ { - rns[ri]++ - } - } - for ci, cl := range ot.Columns { - if cl.DataType() != reflect.Float32 && cl.DataType() != reflect.Float64 { - continue - } - _, cells := cl.RowCellSize() - for di, dt := range dts { - if di == 0 { - continue - } - dc := dt.Columns[ci] - dnr := dt.Rows - mx := min(dnr, maxRows) - for ri := 0; ri < mx; ri++ { - si := ri * cells - for j := 0; j < cells; j++ { - ci := si + j - cv := cl.Float1D(ci) - cv += dc.Float1D(ci) - cl.SetFloat1D(ci, cv) - } - } - } - for ri := 0; ri < maxRows; ri++ { - si := ri * cells - for j := 0; j < cells; j++ { - ci := si + j - cv := cl.Float1D(ci) - if rns[ri] > 0 { - cv /= float64(rns[ri]) - cl.SetFloat1D(ci, cv) - } - } - } - } - return ot -} diff --git a/tensor/stats/stats/tensor.go b/tensor/stats/stats/tensor.go deleted file mode 100644 index bc33041d8e..0000000000 --- a/tensor/stats/stats/tensor.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package stats - -import ( - "math" - - "cogentcore.org/core/tensor" -) - -// StatTensor returns Tensor statistic according to given Stats type applied -// to all non-NaN elements in given Tensor -func StatTensor(tsr tensor.Tensor, stat Stats) float64 { - switch stat { - case Count: - return CountTensor(tsr) - case Sum: - return SumTensor(tsr) - case Prod: - return ProdTensor(tsr) - case Min: - return MinTensor(tsr) - case Max: - return MaxTensor(tsr) - case MinAbs: - return MinAbsTensor(tsr) - case MaxAbs: - return MaxAbsTensor(tsr) - case Mean: - return MeanTensor(tsr) - case Var: - return VarTensor(tsr) - case Std: - return StdTensor(tsr) - case Sem: - return SemTensor(tsr) - case L1Norm: - return L1NormTensor(tsr) - case SumSq: - return SumSqTensor(tsr) - case L2Norm: - return L2NormTensor(tsr) - case VarPop: - return VarPopTensor(tsr) - case StdPop: - return StdPopTensor(tsr) - case SemPop: - return SemPopTensor(tsr) - // case Median: - // return MedianTensor(tsr) - // case Q1: - // return Q1Tensor(tsr) - // case Q3: - // return Q3Tensor(tsr) - } - return 0 -} - -// TensorStat applies given StatFunc function to each element in the tensor -// (automatically skips NaN elements), using float64 conversions of the values. -// ini is the initial value for the agg variable. returns final aggregate value -func TensorStat(tsr tensor.Tensor, ini float64, fun StatFunc) float64 { - ln := tsr.Len() - agg := ini - for j := 0; j < ln; j++ { - val := tsr.Float1D(j) - if !math.IsNaN(val) { - agg = fun(j, val, agg) - } - } - return agg -} - -// CountTensor returns the count of non-NaN elements in given Tensor. -func CountTensor(tsr tensor.Tensor) float64 { - return TensorStat(tsr, 0, CountFunc) -} - -// SumTensor returns the sum of non-NaN elements in given Tensor. -func SumTensor(tsr tensor.Tensor) float64 { - return TensorStat(tsr, 0, SumFunc) -} - -// ProdTensor returns the product of non-NaN elements in given Tensor. -func ProdTensor(tsr tensor.Tensor) float64 { - return TensorStat(tsr, 1, ProdFunc) -} - -// MinTensor returns the minimum of non-NaN elements in given Tensor. -func MinTensor(tsr tensor.Tensor) float64 { - return TensorStat(tsr, math.MaxFloat64, MinFunc) -} - -// MaxTensor returns the maximum of non-NaN elements in given Tensor. -func MaxTensor(tsr tensor.Tensor) float64 { - return TensorStat(tsr, -math.MaxFloat64, MaxFunc) -} - -// MinAbsTensor returns the minimum of non-NaN elements in given Tensor. -func MinAbsTensor(tsr tensor.Tensor) float64 { - return TensorStat(tsr, math.MaxFloat64, MinAbsFunc) -} - -// MaxAbsTensor returns the maximum of non-NaN elements in given Tensor. -func MaxAbsTensor(tsr tensor.Tensor) float64 { - return TensorStat(tsr, -math.MaxFloat64, MaxAbsFunc) -} - -// MeanTensor returns the mean of non-NaN elements in given Tensor. -func MeanTensor(tsr tensor.Tensor) float64 { - cnt := CountTensor(tsr) - if cnt == 0 { - return 0 - } - return SumTensor(tsr) / cnt -} - -// VarTensor returns the sample variance of non-NaN elements in given Tensor. -func VarTensor(tsr tensor.Tensor) float64 { - cnt := CountTensor(tsr) - if cnt < 2 { - return 0 - } - mean := SumTensor(tsr) / cnt - vr := TensorStat(tsr, 0, func(idx int, val float64, agg float64) float64 { - dv := val - mean - return agg + dv*dv - }) - return vr / (cnt - 1) -} - -// StdTensor returns the sample standard deviation of non-NaN elements in given Tensor. -func StdTensor(tsr tensor.Tensor) float64 { - return math.Sqrt(VarTensor(tsr)) -} - -// SemTensor returns the sample standard error of the mean of non-NaN elements in given Tensor. -func SemTensor(tsr tensor.Tensor) float64 { - cnt := CountTensor(tsr) - if cnt < 2 { - return 0 - } - return StdTensor(tsr) / math.Sqrt(cnt) -} - -// L1NormTensor returns the L1 norm: sum of absolute values of non-NaN elements in given Tensor. -func L1NormTensor(tsr tensor.Tensor) float64 { - return TensorStat(tsr, 0, L1NormFunc) -} - -// SumSqTensor returns the sum-of-squares of non-NaN elements in given Tensor. -func SumSqTensor(tsr tensor.Tensor) float64 { - n := tsr.Len() - if n < 2 { - if n == 1 { - return math.Abs(tsr.Float1D(0)) - } - return 0 - } - var ( - scale float64 = 0 - ss float64 = 1 - ) - for j := 0; j < n; j++ { - v := tsr.Float1D(j) - if v == 0 || math.IsNaN(v) { - continue - } - absxi := math.Abs(v) - if scale < absxi { - ss = 1 + ss*(scale/absxi)*(scale/absxi) - scale = absxi - } else { - ss = ss + (absxi/scale)*(absxi/scale) - } - } - if math.IsInf(scale, 1) { - return math.Inf(1) - } - return scale * scale * ss -} - -// L2NormTensor returns the L2 norm: square root of sum-of-squared values of non-NaN elements in given Tensor. -func L2NormTensor(tsr tensor.Tensor) float64 { - return math.Sqrt(SumSqTensor(tsr)) -} - -// VarPopTensor returns the population variance of non-NaN elements in given Tensor. -func VarPopTensor(tsr tensor.Tensor) float64 { - cnt := CountTensor(tsr) - if cnt < 2 { - return 0 - } - mean := SumTensor(tsr) / cnt - vr := TensorStat(tsr, 0, func(idx int, val float64, agg float64) float64 { - dv := val - mean - return agg + dv*dv - }) - return vr / cnt -} - -// StdPopTensor returns the population standard deviation of non-NaN elements in given Tensor. -func StdPopTensor(tsr tensor.Tensor) float64 { - return math.Sqrt(VarPopTensor(tsr)) -} - -// SemPopTensor returns the population standard error of the mean of non-NaN elements in given Tensor. -func SemPopTensor(tsr tensor.Tensor) float64 { - cnt := CountTensor(tsr) - if cnt < 2 { - return 0 - } - return StdPopTensor(tsr) / math.Sqrt(cnt) -} diff --git a/tensor/stats/stats/tensor_test.go b/tensor/stats/stats/tensor_test.go deleted file mode 100644 index 663ff43f50..0000000000 --- a/tensor/stats/stats/tensor_test.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package stats - -import ( - "math" - "testing" - - "cogentcore.org/core/base/tolassert" - "cogentcore.org/core/tensor" - "github.com/stretchr/testify/assert" -) - -func TestTsrAgg(t *testing.T) { - tsr := tensor.New[float64]([]int{5}).(*tensor.Float64) - tsr.Values = []float64{1, 2, 3, 4, 5} - - results := []float64{5, 15, 120, 1, 5, 1, 5, 3, 2.5, math.Sqrt(2.5), math.Sqrt(2.5) / math.Sqrt(5), - 15, 55, math.Sqrt(55), 2, math.Sqrt(2), math.Sqrt(2) / math.Sqrt(5), 3, 2, 4} - - assert.Equal(t, results[Count], CountTensor(tsr)) - assert.Equal(t, results[Sum], SumTensor(tsr)) - assert.Equal(t, results[Prod], ProdTensor(tsr)) - assert.Equal(t, results[Min], MinTensor(tsr)) - assert.Equal(t, results[Max], MaxTensor(tsr)) - assert.Equal(t, results[MinAbs], MinAbsTensor(tsr)) - assert.Equal(t, results[MaxAbs], MaxAbsTensor(tsr)) - assert.Equal(t, results[Mean], MeanTensor(tsr)) - assert.Equal(t, results[Var], VarTensor(tsr)) - assert.Equal(t, results[Std], StdTensor(tsr)) - assert.Equal(t, results[Sem], SemTensor(tsr)) - assert.Equal(t, results[L1Norm], L1NormTensor(tsr)) - tolassert.EqualTol(t, results[SumSq], SumSqTensor(tsr), 1.0e-8) - tolassert.EqualTol(t, results[L2Norm], L2NormTensor(tsr), 1.0e-8) - assert.Equal(t, results[VarPop], VarPopTensor(tsr)) - assert.Equal(t, results[StdPop], StdPopTensor(tsr)) - assert.Equal(t, results[SemPop], SemPopTensor(tsr)) - - for stat := Count; stat <= SemPop; stat++ { - tolassert.EqualTol(t, results[stat], StatTensor(tsr, stat), 1.0e-8) - } -} diff --git a/tensor/string.go b/tensor/string.go deleted file mode 100644 index 4a04bfbe68..0000000000 --- a/tensor/string.go +++ /dev/null @@ -1,267 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tensor - -import ( - "fmt" - "log" - "math" - "strconv" - "strings" - - "cogentcore.org/core/base/slicesx" - "gonum.org/v1/gonum/mat" -) - -// String is a tensor of string values -type String struct { - Base[string] -} - -// NewString returns a new n-dimensional tensor of string values -// with the given sizes per dimension (shape), and optional dimension names. -func NewString(sizes []int, names ...string) *String { - tsr := &String{} - tsr.SetShape(sizes, names...) - tsr.Values = make([]string, tsr.Len()) - return tsr -} - -// NewStringShape returns a new n-dimensional tensor of string values -// using given shape. -func NewStringShape(shape *Shape) *String { - tsr := &String{} - tsr.Shp.CopyShape(shape) - tsr.Values = make([]string, tsr.Len()) - return tsr -} - -// StringToFloat64 converts string value to float64 using strconv, -// returning 0 if any error -func StringToFloat64(str string) float64 { - if fv, err := strconv.ParseFloat(str, 64); err == nil { - return fv - } - return 0 -} - -// Float64ToString converts float64 to string value using strconv, g format -func Float64ToString(val float64) string { - return strconv.FormatFloat(val, 'g', -1, 64) -} - -func (tsr *String) IsString() bool { - return true -} - -func (tsr *String) AddScalar(i []int, val float64) float64 { - j := tsr.Shp.Offset(i) - fv := StringToFloat64(tsr.Values[j]) + val - tsr.Values[j] = Float64ToString(fv) - return fv -} - -func (tsr *String) MulScalar(i []int, val float64) float64 { - j := tsr.Shp.Offset(i) - fv := StringToFloat64(tsr.Values[j]) * val - tsr.Values[j] = Float64ToString(fv) - return fv -} - -func (tsr *String) SetString(i []int, val string) { - j := tsr.Shp.Offset(i) - tsr.Values[j] = val -} - -func (tsr String) SetString1D(off int, val string) { - tsr.Values[off] = val -} - -func (tsr *String) SetStringRowCell(row, cell int, val string) { - _, sz := tsr.Shp.RowCellSize() - tsr.Values[row*sz+cell] = val -} - -// String satisfies the fmt.Stringer interface for string of tensor data -func (tsr *String) String() string { - str := tsr.Label() - sz := len(tsr.Values) - if sz > 1000 { - return str - } - var b strings.Builder - b.WriteString(str) - b.WriteString("\n") - oddRow := true - rows, cols, _, _ := Projection2DShape(&tsr.Shp, oddRow) - for r := 0; r < rows; r++ { - rc, _ := Projection2DCoords(&tsr.Shp, oddRow, r, 0) - b.WriteString(fmt.Sprintf("%v: ", rc)) - for c := 0; c < cols; c++ { - idx := Projection2DIndex(tsr.Shape(), oddRow, r, c) - vl := tsr.Values[idx] - b.WriteString(vl) - } - b.WriteString("\n") - } - return b.String() -} - -func (tsr *String) Float(i []int) float64 { - j := tsr.Shp.Offset(i) - return StringToFloat64(tsr.Values[j]) -} - -func (tsr *String) SetFloat(i []int, val float64) { - j := tsr.Shp.Offset(i) - tsr.Values[j] = Float64ToString(val) -} - -func (tsr *String) Float1D(off int) float64 { - return StringToFloat64(tsr.Values[off]) -} - -func (tsr *String) SetFloat1D(off int, val float64) { - tsr.Values[off] = Float64ToString(val) -} - -func (tsr *String) FloatRowCell(row, cell int) float64 { - _, sz := tsr.Shp.RowCellSize() - return StringToFloat64(tsr.Values[row*sz+cell]) -} - -func (tsr *String) SetFloatRowCell(row, cell int, val float64) { - _, sz := tsr.Shp.RowCellSize() - tsr.Values[row*sz+cell] = Float64ToString(val) -} - -// Floats sets []float64 slice of all elements in the tensor -// (length is ensured to be sufficient). -// This can be used for all of the gonum/floats methods -// for basic math, gonum/stats, etc. -func (tsr *String) Floats(flt *[]float64) { - *flt = slicesx.SetLength(*flt, len(tsr.Values)) - for i, v := range tsr.Values { - (*flt)[i] = StringToFloat64(v) - } -} - -// SetFloats sets tensor values from a []float64 slice (copies values). -func (tsr *String) SetFloats(flt []float64) { - for i, v := range flt { - tsr.Values[i] = Float64ToString(v) - } -} - -// At is the gonum/mat.Matrix interface method for returning 2D matrix element at given -// row, column index. Assumes Row-major ordering and logs an error if NumDims < 2. -func (tsr *String) At(i, j int) float64 { - nd := tsr.NumDims() - if nd < 2 { - log.Println("tensor Dims gonum Matrix call made on Tensor with dims < 2") - return 0 - } else if nd == 2 { - return tsr.Float([]int{i, j}) - } else { - ix := make([]int, nd) - ix[nd-2] = i - ix[nd-1] = j - return tsr.Float(ix) - } -} - -// T is the gonum/mat.Matrix transpose method. -// It performs an implicit transpose by returning the receiver inside a Transpose. -func (tsr *String) T() mat.Matrix { - return mat.Transpose{tsr} -} - -// Range returns the min, max (and associated indexes, -1 = no values) for the tensor. -// This is needed for display and is thus in the core api in optimized form -// Other math operations can be done using gonum/floats package. -func (tsr *String) Range() (min, max float64, minIndex, maxIndex int) { - minIndex = -1 - maxIndex = -1 - for j, vl := range tsr.Values { - fv := StringToFloat64(vl) - if math.IsNaN(fv) { - continue - } - if fv < min || minIndex < 0 { - min = fv - minIndex = j - } - if fv > max || maxIndex < 0 { - max = fv - maxIndex = j - } - } - return -} - -// SetZeros is simple convenience function initialize all values to 0 -func (tsr *String) SetZeros() { - for j := range tsr.Values { - tsr.Values[j] = "" - } -} - -// Clone clones this tensor, creating a duplicate copy of itself with its -// own separate memory representation of all the values, and returns -// that as a Tensor (which can be converted into the known type as needed). -func (tsr *String) Clone() Tensor { - csr := NewStringShape(&tsr.Shp) - copy(csr.Values, tsr.Values) - return csr -} - -// CopyFrom copies all avail values from other tensor into this tensor, with an -// optimized implementation if the other tensor is of the same type, and -// otherwise it goes through appropriate standard type. -func (tsr *String) CopyFrom(frm Tensor) { - if fsm, ok := frm.(*String); ok { - copy(tsr.Values, fsm.Values) - return - } - sz := min(len(tsr.Values), frm.Len()) - for i := 0; i < sz; i++ { - tsr.Values[i] = Float64ToString(frm.Float1D(i)) - } -} - -// CopyShapeFrom copies just the shape from given source tensor -// calling SetShape with the shape params from source (see for more docs). -func (tsr *String) CopyShapeFrom(frm Tensor) { - tsr.SetShape(frm.Shape().Sizes, frm.Shape().Names...) -} - -// CopyCellsFrom copies given range of values from other tensor into this tensor, -// using flat 1D indexes: to = starting index in this Tensor to start copying into, -// start = starting index on from Tensor to start copying from, and n = number of -// values to copy. Uses an optimized implementation if the other tensor is -// of the same type, and otherwise it goes through appropriate standard type. -func (tsr *String) CopyCellsFrom(frm Tensor, to, start, n int) { - if fsm, ok := frm.(*String); ok { - for i := 0; i < n; i++ { - tsr.Values[to+i] = fsm.Values[start+i] - } - return - } - for i := 0; i < n; i++ { - tsr.Values[to+i] = Float64ToString(frm.Float1D(start + i)) - } -} - -// SubSpace returns a new tensor with innermost subspace at given -// offset(s) in outermost dimension(s) (len(offs) < NumDims). -// The new tensor points to the values of the this tensor (i.e., modifications -// will affect both), as its Values slice is a view onto the original (which -// is why only inner-most contiguous supsaces are supported). -// Use Clone() method to separate the two. -func (tsr *String) SubSpace(offs []int) Tensor { - b := tsr.subSpaceImpl(offs) - rt := &String{Base: *b} - return rt -} diff --git a/tensor/table/README.md b/tensor/table/README.md deleted file mode 100644 index e7ed404e77..0000000000 --- a/tensor/table/README.md +++ /dev/null @@ -1,185 +0,0 @@ -# table - -[![Go Reference](https://pkg.go.dev/badge/cogentcore.org/core/table.svg)](https://pkg.go.dev/cogentcore.org/core/table) - -**table** provides a DataTable / DataFrame structure similar to [pandas](https://pandas.pydata.org/) and [xarray](http://xarray.pydata.org/en/stable/) in Python, and [Apache Arrow Table](https://github.com/apache/arrow/tree/master/go/arrow/array/table.go), using [tensor](../tensor) n-dimensional columns aligned by common outermost row dimension. - -See [examples/dataproc](examples/dataproc) for a demo of how to use this system for data analysis, paralleling the example in [Python Data Science](https://jakevdp.github.io/PythonDataScienceHandbook/03.08-aggregation-and-grouping.html) using pandas, to see directly how that translates into this framework. - -As a general convention, it is safest, clearest, and quite fast to access columns by name instead of index (there is a map that caches the column indexes), so the base access method names generally take a column name argument, and those that take a column index have an `Index` suffix. In addition, we use the `Try` suffix for versions that return an error message. It is a bit painful for the writer of these methods but very convenient for the users. - -The following packages are included: - -* [bitslice](bitslice) is a Go slice of bytes `[]byte` that has methods for setting individual bits, as if it was a slice of bools, while being 8x more memory efficient. This is used for encoding null entries in `etensor`, and as a Tensor of bool / bits there as well, and is generally very useful for binary (boolean) data. - -* [etensor](etensor) is a Tensor (n-dimensional array) object. `etensor.Tensor` is an interface that applies to many different type-specific instances, such as `etensor.Float32`. A tensor is just a `etensor.Shape` plus a slice holding the specific data type. Our tensor is based directly on the [Apache Arrow](https://github.com/apache/arrow/tree/master/go) project's tensor, and it fully interoperates with it. Arrow tensors are designed to be read-only, and we needed some extra support to make our `etable.Table` work well, so we had to roll our own. Our tensors also interoperate fully with Gonum's 2D-specific Matrix type for the 2D case. - -* [etable](etable) has the `etable.Table` DataTable / DataFrame object, which is useful for many different data analysis and database functions, and also for holding patterns to present to a neural network, and logs of output from the models, etc. A `etable.Table` is just a slice of `etensor.Tensor` columns, that are all aligned along the outer-most *row* dimension. Index-based indirection, which is essential for efficient Sort, Filter etc, is provided by the `etable.IndexView` type, which is an indexed view into a Table. All data processing operations are defined on the IndexView. - -* [eplot](eplot) provides an interactive 2D plotting GUI in [GoGi](https://cogentcore.org/core/gi) for Table data, using the [gonum plot](https://github.com/gonum/plot) plotting package. You can select which columns to plot and specify various basic plot parameters. - -* [tensorcore](tensorcore) provides an interactive tabular, spreadsheet-style GUI using [GoGi](https://cogentcore.org/core/gi) for viewing and editing `etable.Table` and `etable.Tensor` objects. The `tensorcore.TensorGrid` also provides a colored grid display higher-dimensional tensor data. - -* [agg](agg) provides standard aggregation functions (`Sum`, `Mean`, `Var`, `Std` etc) operating over `etable.IndexView` views of Table data. It also defines standard `AggFunc` functions such as `SumFunc` which can be used for `Agg` functions on either a Tensor or IndexView. - -* [tsragg](tsragg) provides the same agg functions as in `agg`, but operating on all the values in a given `Tensor`. Because of the indexed, row-based nature of tensors in a Table, these are not the same as the `agg` functions. - -* [split](split) supports splitting a Table into any number of indexed sub-views and aggregating over those (i.e., pivot tables), grouping, summarizing data, etc. - -* [metric](metric) provides similarity / distance metrics such as `Euclidean`, `Cosine`, or `Correlation` that operate on slices of `[]float64` or `[]float32`. - -* [simat](simat) provides similarity / distance matrix computation methods operating on `etensor.Tensor` or `etable.Table` data. The `SimMat` type holds the resulting matrix and labels for the rows and columns, which has a special `SimMatGrid` view in `etview` for visualizing labeled similarity matricies. - -* [pca](pca) provides principal-components-analysis (PCA) and covariance matrix computation functions. - -* [clust](clust) provides standard agglomerative hierarchical clustering including ability to plot results in an eplot. - -* [minmax](minmax) is home of basic Min / Max range struct, and `norm` has lots of good functions for computing standard norms and normalizing vectors. - -* [utils](utils) has various table-related utility command-line utility tools, including `etcat` which combines multiple table files into one file, including option for averaging column data. - -# Cheat Sheet - -`et` is the etable pointer variable for examples below: - -## Table Access - -Scalar columns: - -```Go -val := et.Float("ColName", row) -``` - -```Go -str := et.StringValue("ColName", row) -``` - -Tensor (higher-dimensional) columns: - -```Go -tsr := et.Tensor("ColName", row) // entire tensor at cell (a row-level SubSpace of column tensor) -``` - -```Go -val := et.TensorFloat1D("ColName", row, cellidx) // idx is 1D index into cell tensor -``` - -## Set Table Value - -```Go -et.SetFloat("ColName", row, val) -``` - -```Go -et.SetString("ColName", row, str) -``` - -Tensor (higher-dimensional) columns: - -```Go -et.SetTensor("ColName", row, tsr) // set entire tensor at cell -``` - -```Go -et.SetTensorFloat1D("ColName", row, cellidx, val) // idx is 1D index into cell tensor -``` - -## Find Value(s) in Column - -Returns all rows where value matches given value, in string form (any number will convert to a string) - -```Go -rows := et.RowsByString("ColName", "value", etable.Contains, etable.IgnoreCase) -``` - -Other options are `etable.Equals` instead of `Contains` to search for an exact full string, and `etable.UseCase` if case should be used instead of ignored. - -## Index Views (Sort, Filter, etc) - -The [IndexView](https://godoc.org/github.com/goki/etable/v2/etable#IndexView) provides a list of row-wise indexes into a table, and Sorting, Filtering and Splitting all operate on this index view without changing the underlying table data, for maximum efficiency and flexibility. - -```Go -ix := etable.NewIndexView(et) // new view with all rows -``` - -### Sort - -```Go -ix.SortColumnName("Name", etable.Ascending) // etable.Ascending or etable.Descending -SortedTable := ix.NewTable() // turn an IndexView back into a new Table organized in order of indexes -``` - -or: - -```Go -nmcl := et.ColumnByName("Name") // nmcl is an etensor of the Name column, cached -ix.Sort(func(t *Table, i, j int) bool { - return nmcl.StringValue1D(i) < nmcl.StringValue1D(j) -}) -``` - -### Filter - -```Go -nmcl := et.ColumnByName("Name") // column we're filtering on -ix.Filter(func(t *Table, row int) bool { - // filter return value is for what to *keep* (=true), not exclude - // here we keep any row with a name that contains the string "in" - return strings.Contains(nmcl.StringValue1D(row), "in") -}) -``` - -### Splits ("pivot tables" etc), Aggregation - -Create a table of mean values of "Data" column grouped by unique entries in "Name" column, resulting table will be called "DataMean": - -```Go -byNm := split.GroupBy(ix, []string{"Name"}) // column name(s) to group by -split.Agg(byNm, "Data", agg.AggMean) // -gps := byNm.AggsToTable(etable.AddAggName) // etable.AddAggName or etable.ColNameOnly for naming cols -``` - -Describe (basic stats) all columns in a table: - -```Go -ix := etable.NewIndexView(et) // new view with all rows -desc := agg.DescAll(ix) // summary stats of all columns -// get value at given column name (from original table), row "Mean" -mean := desc.Float("ColNm", desc.RowsByString("Agg", "Mean", etable.Equals, etable.UseCase)[0]) -``` - -# CSV / TSV file format - -Tables can be saved and loaded from CSV (comma separated values) or TSV (tab separated values) files. See the next section for special formatting of header strings in these files to record the type and tensor cell shapes. - -## Type and Tensor Headers - -To capture the type and shape of the columns, we support the following header formatting. We weren't able to find any other widely supported standard (please let us know if there is one that we've missed!) - -Here is the mapping of special header prefix characters to standard types: -```Go -'$': etensor.STRING, -'%': etensor.FLOAT32, -'#': etensor.FLOAT64, -'|': etensor.INT64, -'@': etensor.UINT8, -'^': etensor.BOOl, -``` - -Columns that have tensor cell shapes (not just scalars) are marked as such with the *first* such column having a `` suffix indicating the shape of the *cells* in this column, e.g., `<2:5,4>` indicates a 2D cell Y=5,X=4. Each individual column is then indexed as `[ndims:x,y..]` e.g., the first would be `[2:0,0]`, then `[2:0,1]` etc. - -### Example - -Here's a TSV file for a scalar String column (`Name`), a 2D 1x4 tensor float32 column (`Input`), and a 2D 1x2 float32 `Output` column. - -``` -_H: $Name %Input[2:0,0]<2:1,4> %Input[2:0,1] %Input[2:0,2] %Input[2:0,3] %Output[2:0,0]<2:1,2> %Output[2:0,1] -_D: Event_0 1 0 0 0 1 0 -_D: Event_1 0 1 0 0 1 0 -_D: Event_2 0 0 1 0 0 1 -_D: Event_3 0 0 0 1 0 1 -``` - - - - diff --git a/tensor/table/enumgen.go b/tensor/table/enumgen.go deleted file mode 100644 index c4eda161e5..0000000000 --- a/tensor/table/enumgen.go +++ /dev/null @@ -1,46 +0,0 @@ -// Code generated by "core generate"; DO NOT EDIT. - -package table - -import ( - "cogentcore.org/core/enums" -) - -var _DelimsValues = []Delims{0, 1, 2, 3} - -// DelimsN is the highest valid value for type Delims, plus one. -const DelimsN Delims = 4 - -var _DelimsValueMap = map[string]Delims{`Tab`: 0, `Comma`: 1, `Space`: 2, `Detect`: 3} - -var _DelimsDescMap = map[Delims]string{0: `Tab is the tab rune delimiter, for TSV tab separated values`, 1: `Comma is the comma rune delimiter, for CSV comma separated values`, 2: `Space is the space rune delimiter, for SSV space separated value`, 3: `Detect is used during reading a file -- reads the first line and detects tabs or commas`} - -var _DelimsMap = map[Delims]string{0: `Tab`, 1: `Comma`, 2: `Space`, 3: `Detect`} - -// String returns the string representation of this Delims value. -func (i Delims) String() string { return enums.String(i, _DelimsMap) } - -// SetString sets the Delims value from its string representation, -// and returns an error if the string is invalid. -func (i *Delims) SetString(s string) error { return enums.SetString(i, s, _DelimsValueMap, "Delims") } - -// Int64 returns the Delims value as an int64. -func (i Delims) Int64() int64 { return int64(i) } - -// SetInt64 sets the Delims value from an int64. -func (i *Delims) SetInt64(in int64) { *i = Delims(in) } - -// Desc returns the description of the Delims value. -func (i Delims) Desc() string { return enums.Desc(i, _DelimsDescMap) } - -// DelimsValues returns all possible values for the type Delims. -func DelimsValues() []Delims { return _DelimsValues } - -// Values returns all possible values for the type Delims. -func (i Delims) Values() []enums.Enum { return enums.Values(_DelimsValues) } - -// MarshalText implements the [encoding.TextMarshaler] interface. -func (i Delims) MarshalText() ([]byte, error) { return []byte(i.String()), nil } - -// UnmarshalText implements the [encoding.TextUnmarshaler] interface. -func (i *Delims) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Delims") } diff --git a/tensor/table/indexview.go b/tensor/table/indexview.go deleted file mode 100644 index 5a8da3e570..0000000000 --- a/tensor/table/indexview.go +++ /dev/null @@ -1,520 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package table - -import ( - "fmt" - "log" - "math/rand" - "slices" - "sort" - "strings" -) - -// LessFunc is a function used for sort comparisons that returns -// true if Table row i is less than Table row j -- these are the -// raw row numbers, which have already been projected through -// indexes when used for sorting via Indexes. -type LessFunc func(et *Table, i, j int) bool - -// Filterer is a function used for filtering that returns -// true if Table row should be included in the current filtered -// view of the table, and false if it should be removed. -type Filterer func(et *Table, row int) bool - -// IndexView is an indexed wrapper around an table.Table that provides a -// specific view onto the Table defined by the set of indexes. -// This provides an efficient way of sorting and filtering a table by only -// updating the indexes while doing nothing to the Table itself. -// To produce a table that has data actually organized according to the -// indexed order, call the NewTable method. -// IndexView views on a table can also be organized together as Splits -// of the table rows, e.g., by grouping values along a given column. -type IndexView struct { //types:add - - // Table that we are an indexed view onto - Table *Table - - // current indexes into Table - Indexes []int - - // current Less function used in sorting - lessFunc LessFunc `copier:"-" display:"-" xml:"-" json:"-"` -} - -// NewIndexView returns a new IndexView based on given table, initialized with sequential idxes -func NewIndexView(et *Table) *IndexView { - ix := &IndexView{} - ix.SetTable(et) - return ix -} - -// SetTable sets as indexes into given table with sequential initial indexes -func (ix *IndexView) SetTable(et *Table) { - ix.Table = et - ix.Sequential() -} - -// DeleteInvalid deletes all invalid indexes from the list. -// Call this if rows (could) have been deleted from table. -func (ix *IndexView) DeleteInvalid() { - if ix.Table == nil || ix.Table.Rows <= 0 { - ix.Indexes = nil - return - } - ni := ix.Len() - for i := ni - 1; i >= 0; i-- { - if ix.Indexes[i] >= ix.Table.Rows { - ix.Indexes = append(ix.Indexes[:i], ix.Indexes[i+1:]...) - } - } -} - -// Sequential sets indexes to sequential row-wise indexes into table -func (ix *IndexView) Sequential() { //types:add - if ix.Table == nil || ix.Table.Rows <= 0 { - ix.Indexes = nil - return - } - ix.Indexes = make([]int, ix.Table.Rows) - for i := range ix.Indexes { - ix.Indexes[i] = i - } -} - -// Permuted sets indexes to a permuted order -- if indexes already exist -// then existing list of indexes is permuted, otherwise a new set of -// permuted indexes are generated -func (ix *IndexView) Permuted() { - if ix.Table == nil || ix.Table.Rows <= 0 { - ix.Indexes = nil - return - } - if len(ix.Indexes) == 0 { - ix.Indexes = rand.Perm(ix.Table.Rows) - } else { - rand.Shuffle(len(ix.Indexes), func(i, j int) { - ix.Indexes[i], ix.Indexes[j] = ix.Indexes[j], ix.Indexes[i] - }) - } -} - -// AddIndex adds a new index to the list -func (ix *IndexView) AddIndex(idx int) { - ix.Indexes = append(ix.Indexes, idx) -} - -// Sort sorts the indexes into our Table using given Less function. -// The Less function operates directly on row numbers into the Table -// as these row numbers have already been projected through the indexes. -func (ix *IndexView) Sort(lessFunc func(et *Table, i, j int) bool) { - ix.lessFunc = lessFunc - sort.Sort(ix) -} - -// SortIndexes sorts the indexes into our Table directly in -// numerical order, producing the native ordering, while preserving -// any filtering that might have occurred. -func (ix *IndexView) SortIndexes() { - sort.Ints(ix.Indexes) -} - -const ( - // Ascending specifies an ascending sort direction for table Sort routines - Ascending = true - - // Descending specifies a descending sort direction for table Sort routines - Descending = false -) - -// SortColumnName sorts the indexes into our Table according to values in -// given column name, using either ascending or descending order. -// Only valid for 1-dimensional columns. -// Returns error if column name not found. -func (ix *IndexView) SortColumnName(column string, ascending bool) error { //types:add - ci, err := ix.Table.ColumnIndex(column) - if err != nil { - log.Println(err) - return err - } - ix.SortColumn(ci, ascending) - return nil -} - -// SortColumn sorts the indexes into our Table according to values in -// given column index, using either ascending or descending order. -// Only valid for 1-dimensional columns. -func (ix *IndexView) SortColumn(colIndex int, ascending bool) { - cl := ix.Table.Columns[colIndex] - if cl.IsString() { - ix.Sort(func(et *Table, i, j int) bool { - if ascending { - return cl.String1D(i) < cl.String1D(j) - } else { - return cl.String1D(i) > cl.String1D(j) - } - }) - } else { - ix.Sort(func(et *Table, i, j int) bool { - if ascending { - return cl.Float1D(i) < cl.Float1D(j) - } else { - return cl.Float1D(i) > cl.Float1D(j) - } - }) - } -} - -// SortColumnNames sorts the indexes into our Table according to values in -// given column names, using either ascending or descending order. -// Only valid for 1-dimensional columns. -// Returns error if column name not found. -func (ix *IndexView) SortColumnNames(columns []string, ascending bool) error { - nc := len(columns) - if nc == 0 { - return fmt.Errorf("table.IndexView.SortColumnNames: no column names provided") - } - cis := make([]int, nc) - for i, cn := range columns { - ci, err := ix.Table.ColumnIndex(cn) - if err != nil { - log.Println(err) - return err - } - cis[i] = ci - } - ix.SortColumns(cis, ascending) - return nil -} - -// SortColumns sorts the indexes into our Table according to values in -// given list of column indexes, using either ascending or descending order for -// all of the columns. Only valid for 1-dimensional columns. -func (ix *IndexView) SortColumns(colIndexes []int, ascending bool) { - ix.Sort(func(et *Table, i, j int) bool { - for _, ci := range colIndexes { - cl := ix.Table.Columns[ci] - if cl.IsString() { - if ascending { - if cl.String1D(i) < cl.String1D(j) { - return true - } else if cl.String1D(i) > cl.String1D(j) { - return false - } // if equal, fallthrough to next col - } else { - if cl.String1D(i) > cl.String1D(j) { - return true - } else if cl.String1D(i) < cl.String1D(j) { - return false - } // if equal, fallthrough to next col - } - } else { - if ascending { - if cl.Float1D(i) < cl.Float1D(j) { - return true - } else if cl.Float1D(i) > cl.Float1D(j) { - return false - } // if equal, fallthrough to next col - } else { - if cl.Float1D(i) > cl.Float1D(j) { - return true - } else if cl.Float1D(i) < cl.Float1D(j) { - return false - } // if equal, fallthrough to next col - } - } - } - return false - }) -} - -///////////////////////////////////////////////////////////////////////// -// Stable sorts -- sometimes essential.. - -// SortStable stably sorts the indexes into our Table using given Less function. -// The Less function operates directly on row numbers into the Table -// as these row numbers have already been projected through the indexes. -// It is *essential* that it always returns false when the two are equal -// for the stable function to actually work. -func (ix *IndexView) SortStable(lessFunc func(et *Table, i, j int) bool) { - ix.lessFunc = lessFunc - sort.Stable(ix) -} - -// SortStableColumnName sorts the indexes into our Table according to values in -// given column name, using either ascending or descending order. -// Only valid for 1-dimensional columns. -// Returns error if column name not found. -func (ix *IndexView) SortStableColumnName(column string, ascending bool) error { - ci, err := ix.Table.ColumnIndex(column) - if err != nil { - log.Println(err) - return err - } - ix.SortStableColumn(ci, ascending) - return nil -} - -// SortStableColumn sorts the indexes into our Table according to values in -// given column index, using either ascending or descending order. -// Only valid for 1-dimensional columns. -func (ix *IndexView) SortStableColumn(colIndex int, ascending bool) { - cl := ix.Table.Columns[colIndex] - if cl.IsString() { - ix.SortStable(func(et *Table, i, j int) bool { - if ascending { - return cl.String1D(i) < cl.String1D(j) - } else { - return cl.String1D(i) > cl.String1D(j) - } - }) - } else { - ix.SortStable(func(et *Table, i, j int) bool { - if ascending { - return cl.Float1D(i) < cl.Float1D(j) - } else { - return cl.Float1D(i) > cl.Float1D(j) - } - }) - } -} - -// SortStableColumnNames sorts the indexes into our Table according to values in -// given column names, using either ascending or descending order. -// Only valid for 1-dimensional columns. -// Returns error if column name not found. -func (ix *IndexView) SortStableColumnNames(columns []string, ascending bool) error { - nc := len(columns) - if nc == 0 { - return fmt.Errorf("table.IndexView.SortStableColumnNames: no column names provided") - } - cis := make([]int, nc) - for i, cn := range columns { - ci, err := ix.Table.ColumnIndex(cn) - if err != nil { - log.Println(err) - return err - } - cis[i] = ci - } - ix.SortStableColumns(cis, ascending) - return nil -} - -// SortStableColumns sorts the indexes into our Table according to values in -// given list of column indexes, using either ascending or descending order for -// all of the columns. Only valid for 1-dimensional columns. -func (ix *IndexView) SortStableColumns(colIndexes []int, ascending bool) { - ix.SortStable(func(et *Table, i, j int) bool { - for _, ci := range colIndexes { - cl := ix.Table.Columns[ci] - if cl.IsString() { - if ascending { - if cl.String1D(i) < cl.String1D(j) { - return true - } else if cl.String1D(i) > cl.String1D(j) { - return false - } // if equal, fallthrough to next col - } else { - if cl.String1D(i) > cl.String1D(j) { - return true - } else if cl.String1D(i) < cl.String1D(j) { - return false - } // if equal, fallthrough to next col - } - } else { - if ascending { - if cl.Float1D(i) < cl.Float1D(j) { - return true - } else if cl.Float1D(i) > cl.Float1D(j) { - return false - } // if equal, fallthrough to next col - } else { - if cl.Float1D(i) > cl.Float1D(j) { - return true - } else if cl.Float1D(i) < cl.Float1D(j) { - return false - } // if equal, fallthrough to next col - } - } - } - return false - }) -} - -// Filter filters the indexes into our Table using given Filter function. -// The Filter function operates directly on row numbers into the Table -// as these row numbers have already been projected through the indexes. -func (ix *IndexView) Filter(filterer func(et *Table, row int) bool) { - sz := len(ix.Indexes) - for i := sz - 1; i >= 0; i-- { // always go in reverse for filtering - if !filterer(ix.Table, ix.Indexes[i]) { // delete - ix.Indexes = append(ix.Indexes[:i], ix.Indexes[i+1:]...) - } - } -} - -// FilterColumnName filters the indexes into our Table according to values in -// given column name, using string representation of column values. -// Includes rows with matching values unless exclude is set. -// If contains, only checks if row contains string; if ignoreCase, ignores case. -// Use named args for greater clarity. -// Only valid for 1-dimensional columns. -// Returns error if column name not found. -func (ix *IndexView) FilterColumnName(column string, str string, exclude, contains, ignoreCase bool) error { //types:add - ci, err := ix.Table.ColumnIndex(column) - if err != nil { - log.Println(err) - return err - } - ix.FilterColumn(ci, str, exclude, contains, ignoreCase) - return nil -} - -// FilterColumn sorts the indexes into our Table according to values in -// given column index, using string representation of column values. -// Includes rows with matching values unless exclude is set. -// If contains, only checks if row contains string; if ignoreCase, ignores case. -// Use named args for greater clarity. -// Only valid for 1-dimensional columns. -func (ix *IndexView) FilterColumn(colIndex int, str string, exclude, contains, ignoreCase bool) { - col := ix.Table.Columns[colIndex] - lowstr := strings.ToLower(str) - ix.Filter(func(et *Table, row int) bool { - val := col.String1D(row) - has := false - switch { - case contains && ignoreCase: - has = strings.Contains(strings.ToLower(val), lowstr) - case contains: - has = strings.Contains(val, str) - case ignoreCase: - has = strings.EqualFold(val, str) - default: - has = (val == str) - } - if exclude { - return !has - } - return has - }) -} - -// NewTable returns a new table with column data organized according to -// the indexes -func (ix *IndexView) NewTable() *Table { - rows := len(ix.Indexes) - nt := ix.Table.Clone() - nt.SetNumRows(rows) - if rows == 0 { - return nt - } - for ci := range nt.Columns { - scl := ix.Table.Columns[ci] - tcl := nt.Columns[ci] - _, csz := tcl.RowCellSize() - for i, srw := range ix.Indexes { - tcl.CopyCellsFrom(scl, i*csz, srw*csz, csz) - } - } - return nt -} - -// Clone returns a copy of the current index view with its own index memory -func (ix *IndexView) Clone() *IndexView { - nix := &IndexView{} - nix.CopyFrom(ix) - return nix -} - -// CopyFrom copies from given other IndexView (we have our own unique copy of indexes) -func (ix *IndexView) CopyFrom(oix *IndexView) { - ix.Table = oix.Table - ix.Indexes = slices.Clone(oix.Indexes) -} - -// AddRows adds n rows to end of underlying Table, and to the indexes in this view -func (ix *IndexView) AddRows(n int) { //types:add - stidx := ix.Table.Rows - ix.Table.SetNumRows(stidx + n) - for i := stidx; i < stidx+n; i++ { - ix.Indexes = append(ix.Indexes, i) - } -} - -// InsertRows adds n rows to end of underlying Table, and to the indexes starting at -// given index in this view -func (ix *IndexView) InsertRows(at, n int) { - stidx := ix.Table.Rows - ix.Table.SetNumRows(stidx + n) - nw := make([]int, n, n+len(ix.Indexes)-at) - for i := 0; i < n; i++ { - nw[i] = stidx + i - } - ix.Indexes = append(ix.Indexes[:at], append(nw, ix.Indexes[at:]...)...) -} - -// DeleteRows deletes n rows of indexes starting at given index in the list of indexes -func (ix *IndexView) DeleteRows(at, n int) { - ix.Indexes = append(ix.Indexes[:at], ix.Indexes[at+n:]...) -} - -// RowsByStringIndex returns the list of *our indexes* whose row in the table has -// given string value in given column index (de-reference our indexes to get actual row). -// if contains, only checks if row contains string; if ignoreCase, ignores case. -// Use named args for greater clarity. -func (ix *IndexView) RowsByStringIndex(colIndex int, str string, contains, ignoreCase bool) []int { - dt := ix.Table - col := dt.Columns[colIndex] - lowstr := strings.ToLower(str) - var idxs []int - for idx, srw := range ix.Indexes { - val := col.String1D(srw) - has := false - switch { - case contains && ignoreCase: - has = strings.Contains(strings.ToLower(val), lowstr) - case contains: - has = strings.Contains(val, str) - case ignoreCase: - has = strings.EqualFold(val, str) - default: - has = (val == str) - } - if has { - idxs = append(idxs, idx) - } - } - return idxs -} - -// RowsByString returns the list of *our indexes* whose row in the table has -// given string value in given column name (de-reference our indexes to get actual row). -// if contains, only checks if row contains string; if ignoreCase, ignores case. -// returns error message for invalid column name. -// Use named args for greater clarity. -func (ix *IndexView) RowsByString(column string, str string, contains, ignoreCase bool) ([]int, error) { - dt := ix.Table - ci, err := dt.ColumnIndex(column) - if err != nil { - return nil, err - } - return ix.RowsByStringIndex(ci, str, contains, ignoreCase), nil -} - -// Len returns the length of the index list -func (ix *IndexView) Len() int { - return len(ix.Indexes) -} - -// Less calls the LessFunc for sorting -func (ix *IndexView) Less(i, j int) bool { - return ix.lessFunc(ix.Table, ix.Indexes[i], ix.Indexes[j]) -} - -// Swap switches the indexes for i and j -func (ix *IndexView) Swap(i, j int) { - ix.Indexes[i], ix.Indexes[j] = ix.Indexes[j], ix.Indexes[i] -} diff --git a/tensor/table/io.go b/tensor/table/io.go deleted file mode 100644 index c5e0395cfd..0000000000 --- a/tensor/table/io.go +++ /dev/null @@ -1,569 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package table - -import ( - "bufio" - "encoding/csv" - "fmt" - "io" - "io/fs" - "log" - "math" - "os" - "reflect" - "strconv" - "strings" - - "cogentcore.org/core/base/errors" - "cogentcore.org/core/core" - "cogentcore.org/core/tensor" -) - -// Delim are standard CSV delimiter options (Tab, Comma, Space) -type Delims int32 //enums:enum - -const ( - // Tab is the tab rune delimiter, for TSV tab separated values - Tab Delims = iota - - // Comma is the comma rune delimiter, for CSV comma separated values - Comma - - // Space is the space rune delimiter, for SSV space separated value - Space - - // Detect is used during reading a file -- reads the first line and detects tabs or commas - Detect -) - -func (dl Delims) Rune() rune { - switch dl { - case Tab: - return '\t' - case Comma: - return ',' - case Space: - return ' ' - } - return '\t' -} - -const ( - // Headers is passed to CSV methods for the headers arg, to use headers - // that capture full type and tensor shape information. - Headers = true - - // NoHeaders is passed to CSV methods for the headers arg, to not use headers - NoHeaders = false -) - -// SaveCSV writes a table to a comma-separated-values (CSV) file -// (where comma = any delimiter, specified in the delim arg). -// If headers = true then generate column headers that capture the type -// and tensor cell geometry of the columns, enabling full reloading -// of exactly the same table format and data (recommended). -// Otherwise, only the data is written. -func (dt *Table) SaveCSV(filename core.Filename, delim Delims, headers bool) error { //types:add - fp, err := os.Create(string(filename)) - defer fp.Close() - if err != nil { - log.Println(err) - return err - } - bw := bufio.NewWriter(fp) - err = dt.WriteCSV(bw, delim, headers) - bw.Flush() - return err -} - -// SaveCSV writes a table index view to a comma-separated-values (CSV) file -// (where comma = any delimiter, specified in the delim arg). -// If headers = true then generate column headers that capture the type -// and tensor cell geometry of the columns, enabling full reloading -// of exactly the same table format and data (recommended). -// Otherwise, only the data is written. -func (ix *IndexView) SaveCSV(filename core.Filename, delim Delims, headers bool) error { //types:add - fp, err := os.Create(string(filename)) - defer fp.Close() - if err != nil { - log.Println(err) - return err - } - bw := bufio.NewWriter(fp) - err = ix.WriteCSV(bw, delim, headers) - bw.Flush() - return err -} - -// OpenCSV reads a table from a comma-separated-values (CSV) file -// (where comma = any delimiter, specified in the delim arg), -// using the Go standard encoding/csv reader conforming to the official CSV standard. -// If the table does not currently have any columns, the first row of the file -// is assumed to be headers, and columns are constructed therefrom. -// If the file was saved from table with headers, then these have full configuration -// information for tensor type and dimensionality. -// If the table DOES have existing columns, then those are used robustly -// for whatever information fits from each row of the file. -func (dt *Table) OpenCSV(filename core.Filename, delim Delims) error { //types:add - fp, err := os.Open(string(filename)) - if err != nil { - return errors.Log(err) - } - defer fp.Close() - return dt.ReadCSV(bufio.NewReader(fp), delim) -} - -// OpenFS is the version of [Table.OpenCSV] that uses an [fs.FS] filesystem. -func (dt *Table) OpenFS(fsys fs.FS, filename string, delim Delims) error { - fp, err := fsys.Open(filename) - if err != nil { - return errors.Log(err) - } - defer fp.Close() - return dt.ReadCSV(bufio.NewReader(fp), delim) -} - -// OpenCSV reads a table idx view from a comma-separated-values (CSV) file -// (where comma = any delimiter, specified in the delim arg), -// using the Go standard encoding/csv reader conforming to the official CSV standard. -// If the table does not currently have any columns, the first row of the file -// is assumed to be headers, and columns are constructed therefrom. -// If the file was saved from table with headers, then these have full configuration -// information for tensor type and dimensionality. -// If the table DOES have existing columns, then those are used robustly -// for whatever information fits from each row of the file. -func (ix *IndexView) OpenCSV(filename core.Filename, delim Delims) error { //types:add - err := ix.Table.OpenCSV(filename, delim) - ix.Sequential() - return err -} - -// OpenFS is the version of [IndexView.OpenCSV] that uses an [fs.FS] filesystem. -func (ix *IndexView) OpenFS(fsys fs.FS, filename string, delim Delims) error { - err := ix.Table.OpenFS(fsys, filename, delim) - ix.Sequential() - return err -} - -// ReadCSV reads a table from a comma-separated-values (CSV) file -// (where comma = any delimiter, specified in the delim arg), -// using the Go standard encoding/csv reader conforming to the official CSV standard. -// If the table does not currently have any columns, the first row of the file -// is assumed to be headers, and columns are constructed therefrom. -// If the file was saved from table with headers, then these have full configuration -// information for tensor type and dimensionality. -// If the table DOES have existing columns, then those are used robustly -// for whatever information fits from each row of the file. -func (dt *Table) ReadCSV(r io.Reader, delim Delims) error { - cr := csv.NewReader(r) - cr.Comma = delim.Rune() - rec, err := cr.ReadAll() // todo: lazy, avoid resizing - if err != nil || len(rec) == 0 { - return err - } - rows := len(rec) - // cols := len(rec[0]) - strow := 0 - if dt.NumColumns() == 0 || DetectTableHeaders(rec[0]) { - dt.DeleteAll() - err := ConfigFromHeaders(dt, rec[0], rec) - if err != nil { - log.Println(err.Error()) - return err - } - strow++ - rows-- - } - dt.SetNumRows(rows) - for ri := 0; ri < rows; ri++ { - dt.ReadCSVRow(rec[ri+strow], ri) - } - return nil -} - -// ReadCSVRow reads a record of CSV data into given row in table -func (dt *Table) ReadCSVRow(rec []string, row int) { - tc := dt.NumColumns() - ci := 0 - if rec[0] == "_D:" { // data row - ci++ - } - nan := math.NaN() - for j := 0; j < tc; j++ { - tsr := dt.Columns[j] - _, csz := tsr.RowCellSize() - stoff := row * csz - for cc := 0; cc < csz; cc++ { - str := rec[ci] - if !tsr.IsString() { - if str == "" || str == "NaN" || str == "-NaN" || str == "Inf" || str == "-Inf" { - tsr.SetFloat1D(stoff+cc, nan) - } else { - tsr.SetString1D(stoff+cc, strings.TrimSpace(str)) - } - } else { - tsr.SetString1D(stoff+cc, strings.TrimSpace(str)) - } - ci++ - if ci >= len(rec) { - return - } - } - } -} - -// ConfigFromHeaders attempts to configure Table based on the headers. -// for non-table headers, data is examined to determine types. -func ConfigFromHeaders(dt *Table, hdrs []string, rec [][]string) error { - if DetectTableHeaders(hdrs) { - return ConfigFromTableHeaders(dt, hdrs) - } - return ConfigFromDataValues(dt, hdrs, rec) -} - -// DetectTableHeaders looks for special header characters -- returns true if found -func DetectTableHeaders(hdrs []string) bool { - for _, hd := range hdrs { - hd = strings.TrimSpace(hd) - if hd == "" { - continue - } - if hd == "_H:" { - return true - } - if _, ok := TableHeaderToType[hd[0]]; !ok { // all must be table - return false - } - } - return true -} - -// ConfigFromTableHeaders attempts to configure a Table based on special table headers -func ConfigFromTableHeaders(dt *Table, hdrs []string) error { - for _, hd := range hdrs { - hd = strings.TrimSpace(hd) - if hd == "" || hd == "_H:" { - continue - } - typ, hd := TableColumnType(hd) - dimst := strings.Index(hd, "]<") - if dimst > 0 { - dims := hd[dimst+2 : len(hd)-1] - lbst := strings.Index(hd, "[") - hd = hd[:lbst] - csh := ShapeFromString(dims) - // new tensor starting - dt.AddTensorColumnOfType(typ, hd, csh, "Row") - continue - } - dimst = strings.Index(hd, "[") - if dimst > 0 { - continue - } - dt.AddColumnOfType(typ, hd) - } - return nil -} - -// TableHeaderToType maps special header characters to data type -var TableHeaderToType = map[byte]reflect.Kind{ - '$': reflect.String, - '%': reflect.Float32, - '#': reflect.Float64, - '|': reflect.Int, - '^': reflect.Bool, -} - -// TableHeaderChar returns the special header character based on given data type -func TableHeaderChar(typ reflect.Kind) byte { - switch { - case typ == reflect.Bool: - return '^' - case typ == reflect.Float32: - return '%' - case typ == reflect.Float64: - return '#' - case typ >= reflect.Int && typ <= reflect.Uintptr: - return '|' - default: - return '$' - } -} - -// TableColumnType parses the column header for special table type information -func TableColumnType(nm string) (reflect.Kind, string) { - typ, ok := TableHeaderToType[nm[0]] - if ok { - nm = nm[1:] - } else { - typ = reflect.String // most general, default - } - return typ, nm -} - -// ShapeFromString parses string representation of shape as N:d,d,.. -func ShapeFromString(dims string) []int { - clni := strings.Index(dims, ":") - nd, _ := strconv.Atoi(dims[:clni]) - sh := make([]int, nd) - ci := clni + 1 - for i := 0; i < nd; i++ { - dstr := "" - if i < nd-1 { - nci := strings.Index(dims[ci:], ",") - dstr = dims[ci : ci+nci] - ci += nci + 1 - } else { - dstr = dims[ci:] - } - d, _ := strconv.Atoi(dstr) - sh[i] = d - } - return sh -} - -// ConfigFromDataValues configures a Table based on data types inferred -// from the string representation of given records, using header names if present. -func ConfigFromDataValues(dt *Table, hdrs []string, rec [][]string) error { - nr := len(rec) - for ci, hd := range hdrs { - hd = strings.TrimSpace(hd) - if hd == "" { - hd = fmt.Sprintf("col_%d", ci) - } - nmatch := 0 - typ := reflect.String - for ri := 1; ri < nr; ri++ { - rv := rec[ri][ci] - if rv == "" { - continue - } - ctyp := InferDataType(rv) - switch { - case ctyp == reflect.String: // definitive - typ = ctyp - break - case typ == ctyp && (nmatch > 1 || ri == nr-1): // good enough - break - case typ == ctyp: // gather more info - nmatch++ - case typ == reflect.String: // always upgrade from string default - nmatch = 0 - typ = ctyp - case typ == reflect.Int && ctyp == reflect.Float64: // upgrade - nmatch = 0 - typ = ctyp - } - } - dt.AddColumnOfType(typ, hd) - } - return nil -} - -// InferDataType returns the inferred data type for the given string -// only deals with float64, int, and string types -func InferDataType(str string) reflect.Kind { - if strings.Contains(str, ".") { - _, err := strconv.ParseFloat(str, 64) - if err == nil { - return reflect.Float64 - } - } - _, err := strconv.ParseInt(str, 10, 64) - if err == nil { - return reflect.Int - } - // try float again just in case.. - _, err = strconv.ParseFloat(str, 64) - if err == nil { - return reflect.Float64 - } - return reflect.String -} - -////////////////////////////////////////////////////////////////////////// -// WriteCSV - -// WriteCSV writes a table to a comma-separated-values (CSV) file -// (where comma = any delimiter, specified in the delim arg). -// If headers = true then generate column headers that capture the type -// and tensor cell geometry of the columns, enabling full reloading -// of exactly the same table format and data (recommended). -// Otherwise, only the data is written. -func (dt *Table) WriteCSV(w io.Writer, delim Delims, headers bool) error { - ncol := 0 - var err error - if headers { - ncol, err = dt.WriteCSVHeaders(w, delim) - if err != nil { - log.Println(err) - return err - } - } - cw := csv.NewWriter(w) - cw.Comma = delim.Rune() - for ri := 0; ri < dt.Rows; ri++ { - err = dt.WriteCSVRowWriter(cw, ri, ncol) - if err != nil { - log.Println(err) - return err - } - } - cw.Flush() - return nil -} - -// WriteCSV writes only rows in table idx view to a comma-separated-values (CSV) file -// (where comma = any delimiter, specified in the delim arg). -// If headers = true then generate column headers that capture the type -// and tensor cell geometry of the columns, enabling full reloading -// of exactly the same table format and data (recommended). -// Otherwise, only the data is written. -func (ix *IndexView) WriteCSV(w io.Writer, delim Delims, headers bool) error { - ncol := 0 - var err error - if headers { - ncol, err = ix.Table.WriteCSVHeaders(w, delim) - if err != nil { - log.Println(err) - return err - } - } - cw := csv.NewWriter(w) - cw.Comma = delim.Rune() - nrow := ix.Len() - for ri := 0; ri < nrow; ri++ { - err = ix.Table.WriteCSVRowWriter(cw, ix.Indexes[ri], ncol) - if err != nil { - log.Println(err) - return err - } - } - cw.Flush() - return nil -} - -// WriteCSVHeaders writes headers to a comma-separated-values (CSV) file -// (where comma = any delimiter, specified in the delim arg). -// Returns number of columns in header -func (dt *Table) WriteCSVHeaders(w io.Writer, delim Delims) (int, error) { - cw := csv.NewWriter(w) - cw.Comma = delim.Rune() - hdrs := dt.TableHeaders() - nc := len(hdrs) - err := cw.Write(hdrs) - if err != nil { - return nc, err - } - cw.Flush() - return nc, nil -} - -// WriteCSVRow writes given row to a comma-separated-values (CSV) file -// (where comma = any delimiter, specified in the delim arg) -func (dt *Table) WriteCSVRow(w io.Writer, row int, delim Delims) error { - cw := csv.NewWriter(w) - cw.Comma = delim.Rune() - err := dt.WriteCSVRowWriter(cw, row, 0) - cw.Flush() - return err -} - -// WriteCSVRowWriter uses csv.Writer to write one row -func (dt *Table) WriteCSVRowWriter(cw *csv.Writer, row int, ncol int) error { - prec := -1 - if ps, ok := dt.MetaData["precision"]; ok { - prec, _ = strconv.Atoi(ps) - } - var rec []string - if ncol > 0 { - rec = make([]string, 0, ncol) - } else { - rec = make([]string, 0) - } - rc := 0 - for i := range dt.Columns { - tsr := dt.Columns[i] - nd := tsr.NumDims() - if nd == 1 { - vl := "" - if prec <= 0 || tsr.IsString() { - vl = tsr.String1D(row) - } else { - vl = strconv.FormatFloat(tsr.Float1D(row), 'g', prec, 64) - } - if len(rec) <= rc { - rec = append(rec, vl) - } else { - rec[rc] = vl - } - rc++ - } else { - csh := tensor.NewShape(tsr.Shape().Sizes[1:]) // cell shape - tc := csh.Len() - for ti := 0; ti < tc; ti++ { - vl := "" - if prec <= 0 || tsr.IsString() { - vl = tsr.String1D(row*tc + ti) - } else { - vl = strconv.FormatFloat(tsr.Float1D(row*tc+ti), 'g', prec, 64) - } - if len(rec) <= rc { - rec = append(rec, vl) - } else { - rec[rc] = vl - } - rc++ - } - } - } - err := cw.Write(rec) - return err -} - -// TableHeaders generates special header strings from the table -// with full information about type and tensor cell dimensionality. -func (dt *Table) TableHeaders() []string { - hdrs := []string{} - for i := range dt.Columns { - tsr := dt.Columns[i] - nm := dt.ColumnNames[i] - nm = string([]byte{TableHeaderChar(tsr.DataType())}) + nm - if tsr.NumDims() == 1 { - hdrs = append(hdrs, nm) - } else { - csh := tensor.NewShape(tsr.Shape().Sizes[1:]) // cell shape - tc := csh.Len() - nd := csh.NumDims() - fnm := nm + fmt.Sprintf("[%v:", nd) - dn := fmt.Sprintf("<%v:", nd) - ffnm := fnm - for di := 0; di < nd; di++ { - ffnm += "0" - dn += fmt.Sprintf("%v", csh.DimSize(di)) - if di < nd-1 { - ffnm += "," - dn += "," - } - } - ffnm += "]" + dn + ">" - hdrs = append(hdrs, ffnm) - for ti := 1; ti < tc; ti++ { - idx := csh.Index(ti) - ffnm := fnm - for di := 0; di < nd; di++ { - ffnm += fmt.Sprintf("%v", idx[di]) - if di < nd-1 { - ffnm += "," - } - } - ffnm += "]" - hdrs = append(hdrs, ffnm) - } - } - } - return hdrs -} diff --git a/tensor/table/io_test.go b/tensor/table/io_test.go deleted file mode 100644 index 925fc9a2e7..0000000000 --- a/tensor/table/io_test.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package table - -import ( - "os" - "reflect" - "strings" - "testing" -) - -func TestTableHeaders(t *testing.T) { - hdrstr := `$Name %Input[2:0,0]<2:5,5> %Input[2:1,0] %Input[2:2,0] %Input[2:3,0] %Input[2:4,0] %Input[2:0,1] %Input[2:1,1] %Input[2:2,1] %Input[2:3,1] %Input[2:4,1] %Input[2:0,2] %Input[2:1,2] %Input[2:2,2] %Input[2:3,2] %Input[2:4,2] %Input[2:0,3] %Input[2:1,3] %Input[2:2,3] %Input[2:3,3] %Input[2:4,3] %Input[2:0,4] %Input[2:1,4] %Input[2:2,4] %Input[2:3,4] %Input[2:4,4] %Output[2:0,0]<2:5,5> %Output[2:1,0] %Output[2:2,0] %Output[2:3,0] %Output[2:4,0] %Output[2:0,1] %Output[2:1,1] %Output[2:2,1] %Output[2:3,1] %Output[2:4,1] %Output[2:0,2] %Output[2:1,2] %Output[2:2,2] %Output[2:3,2] %Output[2:4,2] %Output[2:0,3] %Output[2:1,3] %Output[2:2,3] %Output[2:3,3] %Output[2:4,3] %Output[2:0,4] %Output[2:1,4] %Output[2:2,4] %Output[2:3,4] %Output[2:4,4] ` - - hdrs := strings.Split(hdrstr, "\t") - dt := NewTable() - err := ConfigFromHeaders(dt, hdrs, nil) - if err != nil { - t.Error(err) - } - // fmt.Printf("schema:\n%v\n", sc) - if dt.NumColumns() != 3 { - t.Errorf("TableHeaders: len != 3\n") - } - if dt.Columns[0].DataType() != reflect.String { - t.Errorf("TableHeaders: dt.Columns[0] != STRING\n") - } - if dt.Columns[1].DataType() != reflect.Float32 { - t.Errorf("TableHeaders: dt.Columns[1] != FLOAT32\n") - } - if dt.Columns[2].DataType() != reflect.Float32 { - t.Errorf("TableHeaders: dt.Columns[2] != FLOAT32\n") - } - if dt.Columns[1].Shape().Sizes[1] != 5 { - t.Errorf("TableHeaders: dt.Columns[1].Shape().Sizes[1] != 5\n") - } - if dt.Columns[1].Shape().Sizes[2] != 5 { - t.Errorf("TableHeaders: dt.Columns[1].Shape().Sizes[2] != 5\n") - } - if dt.Columns[2].Shape().Sizes[1] != 5 { - t.Errorf("TableHeaders: dt.Columns[2].Shape().Sizes[1] != 5\n") - } - if dt.Columns[2].Shape().Sizes[2] != 5 { - t.Errorf("TableHeaders: dt.Columns[2].Shape().Sizes[2] != 5\n") - } - outh := dt.TableHeaders() - // fmt.Printf("headers out:\n%v\n", outh) - for i := 0; i < 2; i++ { // note: due to diff row-major index ordering, other cols are diff.. - hh := hdrs[i] - oh := outh[i] - if hh != oh { - t.Errorf("TableHeaders: hdr %v mismatch %v != %v\n", i, hh, oh) - } - } - if hdrs[26] != outh[26] { - t.Errorf("TableHeaders: hdr %v mismatch %v != %v\n", 26, hdrs[26], outh[26]) - } -} - -func TestReadTableDat(t *testing.T) { - for i := 0; i < 2; i++ { - fp, err := os.Open("testdata/emer_simple_lines_5x5.dat") - defer fp.Close() - if err != nil { - t.Error(err) - } - dt := &Table{} - err = dt.ReadCSV(fp, '\t') // tsv - if err != nil { - t.Error(err) - } - sc := dt.Columns - if len(sc) != 3 { - t.Errorf("TableHeaders: len != 3\n") - } - if sc[0].DataType() != reflect.String { - t.Errorf("TableHeaders: sc[0] != STRING\n") - } - if sc[1].DataType() != reflect.Float32 { - t.Errorf("TableHeaders: sc[1] != FLOAT32\n") - } - if sc[2].DataType() != reflect.Float32 { - t.Errorf("TableHeaders: sc[2] != FLOAT32\n") - } - if sc[1].Shape().DimSize(0) != 6 { - t.Errorf("TableHeaders: sc[1].Dim[0] != 6 = %v\n", sc[1].Shape().DimSize(0)) - } - if sc[1].Shape().DimSize(1) != 5 { - t.Errorf("TableHeaders: sc[1].Dim[1] != 5\n") - } - if sc[2].Shape().DimSize(0) != 6 { - t.Errorf("TableHeaders: sc[2].Dim[0] != 6 = %v\n", sc[2].Shape().DimSize(0)) - } - if sc[2].Shape().DimSize(1) != 5 { - t.Errorf("TableHeaders: sc[2].Dim[1] != 5\n") - } - fo, err := os.Create("testdata/emer_simple_lines_5x5_rec.dat") - defer fo.Close() - if err != nil { - t.Error(err) - } - dt.WriteCSV(fo, '\t', Headers) - } -} diff --git a/tensor/table/slicetable.go b/tensor/table/slicetable.go deleted file mode 100644 index 7587ab6d23..0000000000 --- a/tensor/table/slicetable.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package table - -import ( - "fmt" - "reflect" - - "cogentcore.org/core/base/reflectx" -) - -// NewSliceTable returns a new Table with data from the given slice -// of structs. -func NewSliceTable(st any) (*Table, error) { - npv := reflectx.NonPointerValue(reflect.ValueOf(st)) - if npv.Kind() != reflect.Slice { - return nil, fmt.Errorf("NewSliceTable: not a slice") - } - eltyp := reflectx.NonPointerType(npv.Type().Elem()) - if eltyp.Kind() != reflect.Struct { - return nil, fmt.Errorf("NewSliceTable: element type is not a struct") - } - dt := NewTable() - - for i := 0; i < eltyp.NumField(); i++ { - f := eltyp.Field(i) - switch f.Type.Kind() { - case reflect.Float32: - dt.AddFloat32Column(f.Name) - case reflect.Float64: - dt.AddFloat64Column(f.Name) - case reflect.String: - dt.AddStringColumn(f.Name) - } - } - - nr := npv.Len() - dt.SetNumRows(nr) - for ri := 0; ri < nr; ri++ { - for i := 0; i < eltyp.NumField(); i++ { - f := eltyp.Field(i) - switch f.Type.Kind() { - case reflect.Float32: - dt.SetFloat(f.Name, ri, float64(npv.Index(ri).Field(i).Interface().(float32))) - case reflect.Float64: - dt.SetFloat(f.Name, ri, float64(npv.Index(ri).Field(i).Interface().(float64))) - case reflect.String: - dt.SetString(f.Name, ri, npv.Index(ri).Field(i).Interface().(string)) - } - } - } - return dt, nil -} - -// UpdateSliceTable updates given Table with data from the given slice -// of structs, which must be the same type as used to configure the table -func UpdateSliceTable(st any, dt *Table) { - npv := reflectx.NonPointerValue(reflect.ValueOf(st)) - eltyp := reflectx.NonPointerType(npv.Type().Elem()) - - nr := npv.Len() - dt.SetNumRows(nr) - for ri := 0; ri < nr; ri++ { - for i := 0; i < eltyp.NumField(); i++ { - f := eltyp.Field(i) - switch f.Type.Kind() { - case reflect.Float32: - dt.SetFloat(f.Name, ri, float64(npv.Index(ri).Field(i).Interface().(float32))) - case reflect.Float64: - dt.SetFloat(f.Name, ri, float64(npv.Index(ri).Field(i).Interface().(float64))) - case reflect.String: - dt.SetString(f.Name, ri, npv.Index(ri).Field(i).Interface().(string)) - } - } - } -} diff --git a/tensor/table/slicetable_test.go b/tensor/table/slicetable_test.go deleted file mode 100644 index d74948342f..0000000000 --- a/tensor/table/slicetable_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package table - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -type Data struct { - City string - Population float32 - Area float32 -} - -func TestSliceTable(t *testing.T) { - data := []Data{ - {"Davis", 62000, 500}, - {"Boulder", 85000, 800}, - } - - dt, err := NewSliceTable(data) - if err != nil { - t.Error(err.Error()) - } - assert.Equal(t, 2, dt.Rows) -} diff --git a/tensor/table/splits.go b/tensor/table/splits.go deleted file mode 100644 index faa8ec71c3..0000000000 --- a/tensor/table/splits.go +++ /dev/null @@ -1,505 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package table - -import ( - "fmt" - "slices" - "sort" - "strings" - - "cogentcore.org/core/base/errors" -) - -// SplitAgg contains aggregation results for splits -type SplitAgg struct { - - // the name of the aggregation operation performed, e.g., Sum, Mean, etc - Name string - - // column index on which the aggregation was performed -- results will have same shape as cells in this column - ColumnIndex int - - // aggregation results -- outer index is length of splits, inner is the length of the cell shape for the column - Aggs [][]float64 -} - -// Splits is a list of indexed views into a given Table, that represent a particular -// way of splitting up the data, e.g., whenever a given column value changes. -// -// It is functionally equivalent to the MultiIndex in python's pandas: it has multiple -// levels of indexes as listed in the Levels field, which then have corresponding -// Values for each split. These index levels can be re-ordered, and new Splits or -// IndexViews's can be created from subsets of the existing levels. The Values are -// stored simply as string values, as this is the most general type and often -// index values are labels etc. -// -// For Splits created by the splits.GroupBy function for example, each index Level is -// the column name that the data was grouped by, and the Values for each split are then -// the values of those columns. However, any arbitrary set of levels and values can -// be used, e.g., as in the splits.GroupByFunc function. -// -// Conceptually, a given Split always contains the full "outer product" of all the -// index levels -- there is one split for each unique combination of values along each -// index level. Thus, removing one level collapses across those values and moves the -// corresponding indexes into the remaining split indexes. -// -// You can Sort and Filter based on the index values directly, to reorganize the splits -// and drop particular index values, etc. -// -// Splits also maintains Aggs aggregate values for each split, which can be computed using -// standard aggregation methods over data columns, using the split.Agg* functions. -// -// The table code contains the structural methods for managing the Splits data. -// See split package for end-user methods to generate different kinds of splits, -// and perform aggregations, etc. -type Splits struct { - - // the list of index views for each split - Splits []*IndexView - - // levels of indexes used to organize the splits -- each split contains the full outer product across these index levels. for example, if the split was generated by grouping over column values, then these are the column names in order of grouping. the splits are not automatically sorted hierarchically by these levels but e.g., the GroupBy method produces that result -- use the Sort methods to explicitly sort. - Levels []string - - // the values of the index levels associated with each split. The outer dimension is the same length as Splits, and the inner dimension is the levels. - Values [][]string - - // aggregate results, one for each aggregation operation performed -- split-level data is contained within each SplitAgg struct -- deleting a split removes these aggs but adding new splits just invalidates all existing aggs (they are automatically deleted). - Aggs []*SplitAgg - - // current Less function used in sorting - lessFunc SplitsLessFunc `copier:"-" display:"-" xml:"-" json:"-"` -} - -// SplitsLessFunc is a function used for sort comparisons that returns -// true if split i is less than split j -type SplitsLessFunc func(spl *Splits, i, j int) bool - -// Len returns number of splits -func (spl *Splits) Len() int { - return len(spl.Splits) -} - -// Table returns the table from the first split (should be same for all) -// returns nil if no splits yet -func (spl *Splits) Table() *Table { - if len(spl.Splits) == 0 { - return nil - } - return spl.Splits[0].Table -} - -// New adds a new split to the list for given table, and with associated -// values, which are copied before saving into Values list, and any number of rows -// from the table associated with this split (also copied). -// Any existing Aggs are deleted by this. -func (spl *Splits) New(dt *Table, values []string, rows ...int) *IndexView { - spl.Aggs = nil - ix := &IndexView{Table: dt} - spl.Splits = append(spl.Splits, ix) - if len(rows) > 0 { - ix.Indexes = append(ix.Indexes, slices.Clone(rows)...) - } - if len(values) > 0 { - spl.Values = append(spl.Values, slices.Clone(values)) - } else { - spl.Values = append(spl.Values, nil) - } - return ix -} - -// ByValue finds split indexes by matching to split values, returns nil if not found. -// values are used in order as far as they go and any remaining values are assumed -// to match, and any empty values will match anything. Can use this to access different -// subgroups within overall set of splits. -func (spl *Splits) ByValue(values []string) []int { - var matches []int - for si, sn := range spl.Values { - sz := min(len(sn), len(values)) - match := true - for j := 0; j < sz; j++ { - if values[j] == "" { - continue - } - if values[j] != sn[j] { - match = false - break - } - } - if match { - matches = append(matches, si) - } - } - return matches -} - -// Delete deletes split at given index -- use this to coordinate deletion -// of Splits, Values, and Aggs values for given split -func (spl *Splits) Delete(idx int) { - spl.Splits = append(spl.Splits[:idx], spl.Splits[idx+1:]...) - spl.Values = append(spl.Values[:idx], spl.Values[idx+1:]...) - for _, ag := range spl.Aggs { - ag.Aggs = append(ag.Aggs[:idx], ag.Aggs[idx+1:]...) - } -} - -// Filter removes any split for which given function returns false -func (spl *Splits) Filter(fun func(idx int) bool) { - sz := len(spl.Splits) - for si := sz - 1; si >= 0; si-- { - if !fun(si) { - spl.Delete(si) - } - } -} - -// Sort sorts the splits according to the given Less function. -func (spl *Splits) Sort(lessFunc func(spl *Splits, i, j int) bool) { - spl.lessFunc = lessFunc - sort.Sort(spl) -} - -// SortLevels sorts the splits according to the current index level ordering of values -// i.e., first index level is outer sort dimension, then within that is the next, etc -func (spl *Splits) SortLevels() { - spl.Sort(func(sl *Splits, i, j int) bool { - vli := sl.Values[i] - vlj := sl.Values[j] - for k := range vli { - if vli[k] < vlj[k] { - return true - } else if vli[k] > vlj[k] { - return false - } // fallthrough - } - return false - }) -} - -// SortOrder sorts the splits according to the given ordering of index levels -// which can be a subset as well -func (spl *Splits) SortOrder(order []int) error { - if len(order) == 0 || len(order) > len(spl.Levels) { - return fmt.Errorf("table.Splits SortOrder: order length == 0 or > Levels") - } - spl.Sort(func(sl *Splits, i, j int) bool { - vli := sl.Values[i] - vlj := sl.Values[j] - for k := range order { - if vli[order[k]] < vlj[order[k]] { - return true - } else if vli[order[k]] > vlj[order[k]] { - return false - } // fallthrough - } - return false - }) - return nil -} - -// ReorderLevels re-orders the index levels according to the given new ordering indexes -// e.g., []int{1,0} will move the current level 0 to level 1, and 1 to level 0 -// no checking is done to ensure these are sensible beyond basic length test -- -// behavior undefined if so. Typically you want to call SortLevels after this. -func (spl *Splits) ReorderLevels(order []int) error { - nlev := len(spl.Levels) - if len(order) != nlev { - return fmt.Errorf("table.Splits ReorderLevels: order length != Levels") - } - old := make([]string, nlev) - copy(old, spl.Levels) - for i := range order { - spl.Levels[order[i]] = old[i] - } - for si := range spl.Values { - copy(old, spl.Values[si]) - for i := range order { - spl.Values[si][order[i]] = old[i] - } - } - return nil -} - -// ExtractLevels returns a new Splits that only has the given levels of indexes, -// in their given order, with the other levels removed and their corresponding indexes -// merged into the appropriate remaining levels. -// Any existing aggregation data is not retained in the new splits. -func (spl *Splits) ExtractLevels(levels []int) (*Splits, error) { - nlv := len(levels) - if nlv == 0 || nlv >= len(spl.Levels) { - return nil, fmt.Errorf("table.Splits ExtractLevels: levels length == 0 or >= Levels") - } - aggs := spl.Aggs - spl.Aggs = nil - ss := spl.Clone() - spl.Aggs = aggs - ss.SortOrder(levels) - // now just do the grouping by levels values - lstValues := make([]string, nlv) - curValues := make([]string, nlv) - var curIx *IndexView - nsp := len(ss.Splits) - for si := nsp - 1; si >= 0; si-- { - diff := false - for li := range levels { - vl := ss.Values[si][levels[li]] - curValues[li] = vl - if vl != lstValues[li] { - diff = true - } - } - if diff || curIx == nil { - curIx = ss.Splits[si] - copy(lstValues, curValues) - ss.Values[si] = slices.Clone(curValues) - } else { - curIx.Indexes = append(curIx.Indexes, ss.Splits[si].Indexes...) // absorb - ss.Delete(si) - } - } - ss.Levels = make([]string, nlv) - for li := range levels { - ss.Levels[li] = spl.Levels[levels[li]] - } - return ss, nil -} - -// Clone returns a cloned copy of our SplitAgg -func (sa *SplitAgg) Clone() *SplitAgg { - nsa := &SplitAgg{} - nsa.CopyFrom(sa) - return nsa -} - -// CopyFrom copies from other SplitAgg -- we get our own unique copy of everything -func (sa *SplitAgg) CopyFrom(osa *SplitAgg) { - sa.Name = osa.Name - sa.ColumnIndex = osa.ColumnIndex - nags := len(osa.Aggs) - if nags > 0 { - sa.Aggs = make([][]float64, nags) - for si := range osa.Aggs { - sa.Aggs[si] = slices.Clone(osa.Aggs[si]) - } - } -} - -// Clone returns a cloned copy of our splits -func (spl *Splits) Clone() *Splits { - nsp := &Splits{} - nsp.CopyFrom(spl) - return nsp -} - -// CopyFrom copies from other Splits -- we get our own unique copy of everything -func (spl *Splits) CopyFrom(osp *Splits) { - spl.Splits = make([]*IndexView, len(osp.Splits)) - spl.Values = make([][]string, len(osp.Values)) - for si := range osp.Splits { - spl.Splits[si] = osp.Splits[si].Clone() - spl.Values[si] = slices.Clone(osp.Values[si]) - } - spl.Levels = slices.Clone(osp.Levels) - - nag := len(osp.Aggs) - if nag > 0 { - spl.Aggs = make([]*SplitAgg, nag) - for ai := range osp.Aggs { - spl.Aggs[ai] = osp.Aggs[ai].Clone() - } - } -} - -// AddAgg adds a new set of aggregation results for the Splits -func (spl *Splits) AddAgg(name string, colIndex int) *SplitAgg { - ag := &SplitAgg{Name: name, ColumnIndex: colIndex} - spl.Aggs = append(spl.Aggs, ag) - return ag -} - -// DeleteAggs deletes all existing aggregation data -func (spl *Splits) DeleteAggs() { - spl.Aggs = nil -} - -// AggByName returns Agg results for given name, which does NOT include the -// column name, just the name given to the Agg result -// (e.g., Mean for a standard Mean agg). -// Returns error message if not found. -func (spl *Splits) AggByName(name string) (*SplitAgg, error) { - for _, ag := range spl.Aggs { - if ag.Name == name { - return ag, nil - } - } - return nil, fmt.Errorf("table.Splits AggByName: agg results named: %v not found", name) -} - -// AggByColumnName returns Agg results for given column name, -// optionally including :Name agg name appended, where Name -// is the name given to the Agg result (e.g., Mean for a standard Mean agg). -// Returns error message if not found. -func (spl *Splits) AggByColumnName(name string) (*SplitAgg, error) { - dt := spl.Table() - if dt == nil { - return nil, fmt.Errorf("table.Splits AggByColumnName: table nil") - } - nmsp := strings.Split(name, ":") - colIndex, err := dt.ColumnIndex(nmsp[0]) - if err != nil { - return nil, err - } - for _, ag := range spl.Aggs { - if ag.ColumnIndex != colIndex { - continue - } - if len(nmsp) == 2 && nmsp[1] != ag.Name { - continue - } - return ag, nil - } - return nil, fmt.Errorf("table.Splits AggByColumnName: agg results named: %v not found", name) -} - -// SetLevels sets the Levels index names -- must match actual index dimensionality -// of the Values. This is automatically done by e.g., GroupBy, but must be done -// manually if creating custom indexes. -func (spl *Splits) SetLevels(levels ...string) { - spl.Levels = levels -} - -// use these for arg to ArgsToTable* -const ( - // ColumnNameOnly means resulting agg table just has the original column name, no aggregation name - ColumnNameOnly bool = true - // AddAggName means resulting agg table columns have aggregation name appended - AddAggName = false -) - -// AggsToTable returns a Table containing this Splits' aggregate data. -// Must have Levels and Aggs all created as in the split.Agg* methods. -// if colName == ColumnNameOnly, then the name of the columns for the Table -// is just the corresponding agg column name -- otherwise it also includes -// the name of the aggregation function with a : divider (e.g., Name:Mean) -func (spl *Splits) AggsToTable(colName bool) *Table { - nsp := len(spl.Splits) - if nsp == 0 { - return nil - } - dt := spl.Splits[0].Table - st := NewTable().SetNumRows(nsp) - for _, cn := range spl.Levels { - oc, _ := dt.ColumnByName(cn) - if oc != nil { - st.AddColumnOfType(oc.DataType(), cn) - } else { - st.AddStringColumn(cn) - } - } - for _, ag := range spl.Aggs { - col := dt.Columns[ag.ColumnIndex] - an := dt.ColumnNames[ag.ColumnIndex] - if colName == AddAggName { - an += ":" + ag.Name - } - st.AddFloat64TensorColumn(an, col.Shape().Sizes[1:], col.Shape().Names[1:]...) - } - for si := range spl.Splits { - cidx := 0 - for ci := range spl.Levels { - col := st.Columns[cidx] - col.SetString1D(si, spl.Values[si][ci]) - cidx++ - } - for _, ag := range spl.Aggs { - col := st.Columns[cidx] - _, csz := col.RowCellSize() - sti := si * csz - av := ag.Aggs[si] - for j, a := range av { - col.SetFloat1D(sti+j, a) - } - cidx++ - } - } - return st -} - -// AggsToTableCopy returns a Table containing this Splits' aggregate data -// and a copy of the first row of data for each split for all non-agg cols, -// which is useful for recording other data that goes along with aggregated values. -// Must have Levels and Aggs all created as in the split.Agg* methods. -// if colName == ColumnNameOnly, then the name of the columns for the Table -// is just the corresponding agg column name -- otherwise it also includes -// the name of the aggregation function with a : divider (e.g., Name:Mean) -func (spl *Splits) AggsToTableCopy(colName bool) *Table { - nsp := len(spl.Splits) - if nsp == 0 { - return nil - } - dt := spl.Splits[0].Table - st := NewTable().SetNumRows(nsp) - exmap := make(map[string]struct{}) - for _, cn := range spl.Levels { - st.AddStringColumn(cn) - exmap[cn] = struct{}{} - } - for _, ag := range spl.Aggs { - col := dt.Columns[ag.ColumnIndex] - an := dt.ColumnNames[ag.ColumnIndex] - exmap[an] = struct{}{} - if colName == AddAggName { - an += ":" + ag.Name - } - st.AddFloat64TensorColumn(an, col.Shape().Sizes[1:], col.Shape().Names[1:]...) - } - var cpcol []string - for _, cn := range dt.ColumnNames { - if _, ok := exmap[cn]; !ok { - cpcol = append(cpcol, cn) - col := errors.Log1(dt.ColumnByName(cn)) - st.AddColumn(col.Clone(), cn) - } - } - for si, sidx := range spl.Splits { - cidx := 0 - for ci := range spl.Levels { - col := st.Columns[cidx] - col.SetString1D(si, spl.Values[si][ci]) - cidx++ - } - for _, ag := range spl.Aggs { - col := st.Columns[cidx] - _, csz := col.RowCellSize() - sti := si * csz - av := ag.Aggs[si] - for j, a := range av { - col.SetFloat1D(sti+j, a) - } - cidx++ - } - if len(sidx.Indexes) > 0 { - stidx := sidx.Indexes[0] - for _, cn := range cpcol { - st.CopyCell(cn, si, dt, cn, stidx) - } - } - } - return st -} - -// Less calls the LessFunc for sorting -func (spl *Splits) Less(i, j int) bool { - return spl.lessFunc(spl, i, j) -} - -// Swap switches the indexes for i and j -func (spl *Splits) Swap(i, j int) { - spl.Splits[i], spl.Splits[j] = spl.Splits[j], spl.Splits[i] - spl.Values[i], spl.Values[j] = spl.Values[j], spl.Values[i] - for _, ag := range spl.Aggs { - ag.Aggs[i], ag.Aggs[j] = ag.Aggs[j], ag.Aggs[i] - } -} diff --git a/tensor/table/table.go b/tensor/table/table.go deleted file mode 100644 index 42c507b0ae..0000000000 --- a/tensor/table/table.go +++ /dev/null @@ -1,709 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package table - -//go:generate core generate - -import ( - "errors" - "fmt" - "log/slog" - "math" - "reflect" - "slices" - "strings" - - "cogentcore.org/core/tensor" -) - -// Table is a table of data, with columns of tensors, -// each with the same number of Rows (outer-most dimension). -type Table struct { //types:add - - // columns of data, as tensor.Tensor tensors - Columns []tensor.Tensor `display:"no-inline"` - - // the names of the columns - ColumnNames []string - - // number of rows, which is enforced to be the size of the outer-most dimension of the column tensors - Rows int `edit:"-"` - - // the map of column names to column numbers - ColumnNameMap map[string]int `display:"-"` - - // misc meta data for the table. We use lower-case key names following the struct tag convention: name = name of table; desc = description; read-only = gui is read-only; precision = n for precision to write out floats in csv. For Column-specific data, we look for ColumnName: prefix, specifically ColumnName:desc = description of the column contents, which is shown as tooltip in the tensorcore.Table, and :width for width of a column - MetaData map[string]string -} - -func NewTable(name ...string) *Table { - dt := &Table{} - if len(name) > 0 { - dt.SetMetaData("name", name[0]) - } - return dt -} - -// IsValidRow returns error if the row is invalid -func (dt *Table) IsValidRow(row int) error { - if row < 0 || row >= dt.Rows { - return fmt.Errorf("table.Table IsValidRow: row %d is out of valid range [0..%d]", row, dt.Rows) - } - return nil -} - -// NumRows returns the number of rows -func (dt *Table) NumRows() int { return dt.Rows } - -// NumColumns returns the number of columns -func (dt *Table) NumColumns() int { return len(dt.Columns) } - -// Column returns the tensor at given column index -func (dt *Table) Column(i int) tensor.Tensor { return dt.Columns[i] } - -// ColumnByName returns the tensor at given column name, with error message if not found. -// Returns nil if not found -func (dt *Table) ColumnByName(name string) (tensor.Tensor, error) { - i, ok := dt.ColumnNameMap[name] - if !ok { - return nil, fmt.Errorf("table.Table ColumnByNameTry: column named: %v not found", name) - } - return dt.Columns[i], nil -} - -// ColumnIndex returns the index of the given column name, -// along with an error if not found. -func (dt *Table) ColumnIndex(name string) (int, error) { - i, ok := dt.ColumnNameMap[name] - if !ok { - return 0, fmt.Errorf("table.Table ColumnIndex: column named: %v not found", name) - } - return i, nil -} - -// ColumnIndexesByNames returns the indexes of the given column names. -// idxs have -1 if name not found. -func (dt *Table) ColumnIndexesByNames(names ...string) ([]int, error) { - nc := len(names) - if nc == 0 { - return nil, nil - } - var errs []error - cidx := make([]int, nc) - for i, cn := range names { - var err error - cidx[i], err = dt.ColumnIndex(cn) - if err != nil { - errs = append(errs, err) - } - } - return cidx, errors.Join(errs...) -} - -// ColumnName returns the name of given column -func (dt *Table) ColumnName(i int) string { - return dt.ColumnNames[i] -} - -// UpdateColumnNameMap updates the column name map, returning an error -// if any of the column names are duplicates. -func (dt *Table) UpdateColumnNameMap() error { - nc := dt.NumColumns() - dt.ColumnNameMap = make(map[string]int, nc) - var errs []error - for i, nm := range dt.ColumnNames { - if _, has := dt.ColumnNameMap[nm]; has { - err := fmt.Errorf("table.Table duplicate column name: %s", nm) - slog.Warn(err.Error()) - errs = append(errs, err) - } else { - dt.ColumnNameMap[nm] = i - } - } - if len(errs) > 0 { - return errors.Join(errs...) - } - return nil -} - -// AddColumn adds a new column to the table, of given type and column name -// (which must be unique). The cells of this column hold a single scalar value: -// see AddColumnTensor for n-dimensional cells. -func AddColumn[T string | bool | float32 | float64 | int | int32 | byte](dt *Table, name string) tensor.Tensor { - rows := max(1, dt.Rows) - tsr := tensor.New[T]([]int{rows}, "Row") - dt.AddColumn(tsr, name) - return tsr -} - -// InsertColumn inserts a new column to the table, of given type and column name -// (which must be unique), at given index. -// The cells of this column hold a single scalar value. -func InsertColumn[T string | bool | float32 | float64 | int | int32 | byte](dt *Table, name string, idx int) tensor.Tensor { - rows := max(1, dt.Rows) - tsr := tensor.New[T]([]int{rows}, "Row") - dt.InsertColumn(tsr, name, idx) - return tsr -} - -// AddTensorColumn adds a new n-dimensional column to the table, of given type, column name -// (which must be unique), and dimensionality of each _cell_. -// An outer-most Row dimension will be added to this dimensionality to create -// the tensor column. -func AddTensorColumn[T string | bool | float32 | float64 | int | int32 | byte](dt *Table, name string, cellSizes []int, dimNames ...string) tensor.Tensor { - rows := max(1, dt.Rows) - sz := append([]int{rows}, cellSizes...) - nms := append([]string{"Row"}, dimNames...) - tsr := tensor.New[T](sz, nms...) - dt.AddColumn(tsr, name) - return tsr -} - -// AddColumn adds the given tensor as a column to the table, -// returning an error and not adding if the name is not unique. -// Automatically adjusts the shape to fit the current number of rows. -func (dt *Table) AddColumn(tsr tensor.Tensor, name string) error { - dt.ColumnNames = append(dt.ColumnNames, name) - err := dt.UpdateColumnNameMap() - if err != nil { - dt.ColumnNames = dt.ColumnNames[:len(dt.ColumnNames)-1] - return err - } - dt.Columns = append(dt.Columns, tsr) - rows := max(1, dt.Rows) - tsr.SetNumRows(rows) - return nil -} - -// InsertColumn inserts the given tensor as a column to the table at given index, -// returning an error and not adding if the name is not unique. -// Automatically adjusts the shape to fit the current number of rows. -func (dt *Table) InsertColumn(tsr tensor.Tensor, name string, idx int) error { - if _, has := dt.ColumnNameMap[name]; has { - err := fmt.Errorf("table.Table duplicate column name: %s", name) - slog.Warn(err.Error()) - return err - } - dt.ColumnNames = slices.Insert(dt.ColumnNames, idx, name) - dt.UpdateColumnNameMap() - dt.Columns = slices.Insert(dt.Columns, idx, tsr) - rows := max(1, dt.Rows) - tsr.SetNumRows(rows) - return nil -} - -// AddColumnOfType adds a new scalar column to the table, of given reflect type, -// column name (which must be unique), -// The cells of this column hold a single (scalar) value of given type. -// Supported types are string, bool (for [tensor.Bits]), float32, float64, int, int32, and byte. -func (dt *Table) AddColumnOfType(typ reflect.Kind, name string) tensor.Tensor { - rows := max(1, dt.Rows) - tsr := tensor.NewOfType(typ, []int{rows}, "Row") - dt.AddColumn(tsr, name) - return tsr -} - -// AddTensorColumnOfType adds a new n-dimensional column to the table, of given reflect type, -// column name (which must be unique), and dimensionality of each _cell_. -// An outer-most Row dimension will be added to this dimensionality to create -// the tensor column. -// Supported types are string, bool (for [tensor.Bits]), float32, float64, int, int32, and byte. -func (dt *Table) AddTensorColumnOfType(typ reflect.Kind, name string, cellSizes []int, dimNames ...string) tensor.Tensor { - rows := max(1, dt.Rows) - sz := append([]int{rows}, cellSizes...) - nms := append([]string{"Row"}, dimNames...) - tsr := tensor.NewOfType(typ, sz, nms...) - dt.AddColumn(tsr, name) - return tsr -} - -// AddStringColumn adds a new String column with given name. -// The cells of this column hold a single string value. -func (dt *Table) AddStringColumn(name string) *tensor.String { - return AddColumn[string](dt, name).(*tensor.String) -} - -// AddFloat64Column adds a new float64 column with given name. -// The cells of this column hold a single scalar value. -func (dt *Table) AddFloat64Column(name string) *tensor.Float64 { - return AddColumn[float64](dt, name).(*tensor.Float64) -} - -// AddFloat64TensorColumn adds a new n-dimensional float64 column with given name -// and dimensionality of each _cell_. -// An outer-most Row dimension will be added to this dimensionality to create -// the tensor column. -func (dt *Table) AddFloat64TensorColumn(name string, cellSizes []int, dimNames ...string) *tensor.Float64 { - return AddTensorColumn[float64](dt, name, cellSizes, dimNames...).(*tensor.Float64) -} - -// AddFloat32Column adds a new float32 column with given name. -// The cells of this column hold a single scalar value. -func (dt *Table) AddFloat32Column(name string) *tensor.Float32 { - return AddColumn[float32](dt, name).(*tensor.Float32) -} - -// AddFloat32TensorColumn adds a new n-dimensional float32 column with given name -// and dimensionality of each _cell_. -// An outer-most Row dimension will be added to this dimensionality to create -// the tensor column. -func (dt *Table) AddFloat32TensorColumn(name string, cellSizes []int, dimNames ...string) *tensor.Float32 { - return AddTensorColumn[float32](dt, name, cellSizes, dimNames...).(*tensor.Float32) -} - -// AddIntColumn adds a new int column with given name. -// The cells of this column hold a single scalar value. -func (dt *Table) AddIntColumn(name string) *tensor.Int { - return AddColumn[int](dt, name).(*tensor.Int) -} - -// AddIntTensorColumn adds a new n-dimensional int column with given name -// and dimensionality of each _cell_. -// An outer-most Row dimension will be added to this dimensionality to create -// the tensor column. -func (dt *Table) AddIntTensorColumn(name string, cellSizes []int, dimNames ...string) *tensor.Int { - return AddTensorColumn[int](dt, name, cellSizes, dimNames...).(*tensor.Int) -} - -// DeleteColumnName deletes column of given name. -// returns error if not found. -func (dt *Table) DeleteColumnName(name string) error { - ci, err := dt.ColumnIndex(name) - if err != nil { - return err - } - dt.DeleteColumnIndex(ci) - return nil -} - -// DeleteColumnIndex deletes column of given index -func (dt *Table) DeleteColumnIndex(idx int) { - dt.Columns = append(dt.Columns[:idx], dt.Columns[idx+1:]...) - dt.ColumnNames = append(dt.ColumnNames[:idx], dt.ColumnNames[idx+1:]...) - dt.UpdateColumnNameMap() -} - -// DeleteAll deletes all columns -- full reset -func (dt *Table) DeleteAll() { - dt.Columns = nil - dt.ColumnNames = nil - dt.Rows = 0 - dt.ColumnNameMap = nil -} - -// AddRows adds n rows to each of the columns -func (dt *Table) AddRows(n int) { //types:add - dt.SetNumRows(dt.Rows + n) -} - -// SetNumRows sets the number of rows in the table, across all columns -// if rows = 0 then effective number of rows in tensors is 1, as this dim cannot be 0 -func (dt *Table) SetNumRows(rows int) *Table { //types:add - dt.Rows = rows // can be 0 - rows = max(1, rows) - for _, tsr := range dt.Columns { - tsr.SetNumRows(rows) - } - return dt -} - -// note: no really clean definition of CopyFrom -- no point of re-using existing -// table -- just clone it. - -// Clone returns a complete copy of this table -func (dt *Table) Clone() *Table { - cp := NewTable().SetNumRows(dt.Rows) - cp.CopyMetaDataFrom(dt) - for i, cl := range dt.Columns { - cp.AddColumn(cl.Clone(), dt.ColumnNames[i]) - } - return cp -} - -// AppendRows appends shared columns in both tables with input table rows -func (dt *Table) AppendRows(dt2 *Table) { - shared := false - strow := dt.Rows - for iCol := range dt.Columns { - colName := dt.ColumnName(iCol) - _, err := dt2.ColumnIndex(colName) - if err != nil { - continue - } - if !shared { - shared = true - dt.AddRows(dt2.Rows) - } - for iRow := 0; iRow < dt2.Rows; iRow++ { - dt.CopyCell(colName, iRow+strow, dt2, colName, iRow) - } - } -} - -// SetMetaData sets given meta-data key to given value, safely creating the -// map if not yet initialized. Standard Keys are: -// * name -- name of table -// * desc -- description of table -// * read-only -- makes gui read-only (inactive edits) for tensorcore.Table -// * ColumnName:* -- prefix for all column-specific meta-data -// - desc -- description of column -func (dt *Table) SetMetaData(key, val string) { - if dt.MetaData == nil { - dt.MetaData = make(map[string]string) - } - dt.MetaData[key] = val -} - -// CopyMetaDataFrom copies meta data from other table -func (dt *Table) CopyMetaDataFrom(cp *Table) { - nm := len(cp.MetaData) - if nm == 0 { - return - } - if dt.MetaData == nil { - dt.MetaData = make(map[string]string, nm) - } - for k, v := range cp.MetaData { - dt.MetaData[k] = v - } -} - -// Named arg values for Contains, IgnoreCase -const ( - // Contains means the string only needs to contain the target string (see Equals) - Contains bool = true - // Equals means the string must equal the target string (see Contains) - Equals = false - // IgnoreCase means that differences in case are ignored in comparing strings - IgnoreCase = true - // UseCase means that case matters when comparing strings - UseCase = false -) - -// RowsByStringIndex returns the list of rows that have given -// string value in given column index. -// if contains, only checks if row contains string; if ignoreCase, ignores case. -// Use named args for greater clarity. -func (dt *Table) RowsByStringIndex(column int, str string, contains, ignoreCase bool) []int { - col := dt.Columns[column] - lowstr := strings.ToLower(str) - var idxs []int - for i := 0; i < dt.Rows; i++ { - val := col.String1D(i) - has := false - switch { - case contains && ignoreCase: - has = strings.Contains(strings.ToLower(val), lowstr) - case contains: - has = strings.Contains(val, str) - case ignoreCase: - has = strings.EqualFold(val, str) - default: - has = (val == str) - } - if has { - idxs = append(idxs, i) - } - } - return idxs -} - -// RowsByString returns the list of rows that have given -// string value in given column name. returns nil & error if name invalid. -// if contains, only checks if row contains string; if ignoreCase, ignores case. -// Use named args for greater clarity. -func (dt *Table) RowsByString(column string, str string, contains, ignoreCase bool) ([]int, error) { - ci, err := dt.ColumnIndex(column) - if err != nil { - return nil, err - } - return dt.RowsByStringIndex(ci, str, contains, ignoreCase), nil -} - -////////////////////////////////////////////////////////////////////////////////////// -// Cell convenience access methods - -// FloatIndex returns the float64 value of cell at given column, row index -// for columns that have 1-dimensional tensors. -// Returns NaN if column is not a 1-dimensional tensor or row not valid. -func (dt *Table) FloatIndex(column, row int) float64 { - if dt.IsValidRow(row) != nil { - return math.NaN() - } - ct := dt.Columns[column] - if ct.NumDims() != 1 { - return math.NaN() - } - return ct.Float1D(row) -} - -// Float returns the float64 value of cell at given column (by name), -// row index for columns that have 1-dimensional tensors. -// Returns NaN if column is not a 1-dimensional tensor -// or col name not found, or row not valid. -func (dt *Table) Float(column string, row int) float64 { - if dt.IsValidRow(row) != nil { - return math.NaN() - } - ct, err := dt.ColumnByName(column) - if err != nil { - return math.NaN() - } - if ct.NumDims() != 1 { - return math.NaN() - } - return ct.Float1D(row) -} - -// StringIndex returns the string value of cell at given column, row index -// for columns that have 1-dimensional tensors. -// Returns "" if column is not a 1-dimensional tensor or row not valid. -func (dt *Table) StringIndex(column, row int) string { - if dt.IsValidRow(row) != nil { - return "" - } - ct := dt.Columns[column] - if ct.NumDims() != 1 { - return "" - } - return ct.String1D(row) -} - -// NOTE: String conflicts with [fmt.Stringer], so we have to use StringValue - -// StringValue returns the string value of cell at given column (by name), row index -// for columns that have 1-dimensional tensors. -// Returns "" if column is not a 1-dimensional tensor or row not valid. -func (dt *Table) StringValue(column string, row int) string { - if dt.IsValidRow(row) != nil { - return "" - } - ct, err := dt.ColumnByName(column) - if err != nil { - return "" - } - if ct.NumDims() != 1 { - return "" - } - return ct.String1D(row) -} - -// TensorIndex returns the tensor SubSpace for given column, row index -// for columns that have higher-dimensional tensors so each row is -// represented by an n-1 dimensional tensor, with the outer dimension -// being the row number. Returns nil if column is a 1-dimensional -// tensor or there is any error from the tensor.Tensor.SubSpace call. -func (dt *Table) TensorIndex(column, row int) tensor.Tensor { - if dt.IsValidRow(row) != nil { - return nil - } - ct := dt.Columns[column] - if ct.NumDims() == 1 { - return nil - } - return ct.SubSpace([]int{row}) -} - -// Tensor returns the tensor SubSpace for given column (by name), row index -// for columns that have higher-dimensional tensors so each row is -// represented by an n-1 dimensional tensor, with the outer dimension -// being the row number. Returns nil on any error. -func (dt *Table) Tensor(column string, row int) tensor.Tensor { - if dt.IsValidRow(row) != nil { - return nil - } - ct, err := dt.ColumnByName(column) - if err != nil { - return nil - } - if ct.NumDims() == 1 { - return nil - } - return ct.SubSpace([]int{row}) -} - -// TensorFloat1D returns the float value of a Tensor cell's cell at given -// 1D offset within cell, for given column (by name), row index -// for columns that have higher-dimensional tensors so each row is -// represented by an n-1 dimensional tensor, with the outer dimension -// being the row number. Returns 0 on any error. -func (dt *Table) TensorFloat1D(column string, row int, idx int) float64 { - if dt.IsValidRow(row) != nil { - return math.NaN() - } - ct, err := dt.ColumnByName(column) - if err != nil { - return math.NaN() - } - if ct.NumDims() == 1 { - return math.NaN() - } - _, sz := ct.RowCellSize() - if idx >= sz || idx < 0 { - return math.NaN() - } - off := row*sz + idx - return ct.Float1D(off) -} - -///////////////////////////////////////////////////////////////////////////////////// -// Set - -// SetFloatIndex sets the float64 value of cell at given column, row index -// for columns that have 1-dimensional tensors. -func (dt *Table) SetFloatIndex(column, row int, val float64) error { - if err := dt.IsValidRow(row); err != nil { - return err - } - ct := dt.Columns[column] - if ct.NumDims() != 1 { - return fmt.Errorf("table.Table SetFloatIndex: Column %d is a tensor, must use SetTensorFloat1D", column) - } - ct.SetFloat1D(row, val) - return nil -} - -// SetFloat sets the float64 value of cell at given column (by name), row index -// for columns that have 1-dimensional tensors. -func (dt *Table) SetFloat(column string, row int, val float64) error { - if err := dt.IsValidRow(row); err != nil { - return err - } - ct, err := dt.ColumnByName(column) - if err != nil { - return err - } - if ct.NumDims() != 1 { - return fmt.Errorf("table.Table SetFloat: Column %s is a tensor, must use SetTensorFloat1D", column) - } - ct.SetFloat1D(row, val) - return nil -} - -// SetStringIndex sets the string value of cell at given column, row index -// for columns that have 1-dimensional tensors. Returns true if set. -func (dt *Table) SetStringIndex(column, row int, val string) error { - if err := dt.IsValidRow(row); err != nil { - return err - } - ct := dt.Columns[column] - if ct.NumDims() != 1 { - return fmt.Errorf("table.Table SetStringIndex: Column %d is a tensor, must use SetTensorFloat1D", column) - } - ct.SetString1D(row, val) - return nil -} - -// SetString sets the string value of cell at given column (by name), row index -// for columns that have 1-dimensional tensors. Returns true if set. -func (dt *Table) SetString(column string, row int, val string) error { - if err := dt.IsValidRow(row); err != nil { - return err - } - ct, err := dt.ColumnByName(column) - if err != nil { - return err - } - if ct.NumDims() != 1 { - return fmt.Errorf("table.Table SetString: Column %s is a tensor, must use SetTensorFloat1D", column) - } - ct.SetString1D(row, val) - return nil -} - -// SetTensorIndex sets the tensor value of cell at given column, row index -// for columns that have n-dimensional tensors. Returns true if set. -func (dt *Table) SetTensorIndex(column, row int, val tensor.Tensor) error { - if err := dt.IsValidRow(row); err != nil { - return err - } - ct := dt.Columns[column] - _, csz := ct.RowCellSize() - st := row * csz - sz := min(csz, val.Len()) - if ct.IsString() { - for j := 0; j < sz; j++ { - ct.SetString1D(st+j, val.String1D(j)) - } - } else { - for j := 0; j < sz; j++ { - ct.SetFloat1D(st+j, val.Float1D(j)) - } - } - return nil -} - -// SetTensor sets the tensor value of cell at given column (by name), row index -// for columns that have n-dimensional tensors. Returns true if set. -func (dt *Table) SetTensor(column string, row int, val tensor.Tensor) error { - if err := dt.IsValidRow(row); err != nil { - return err - } - ci, err := dt.ColumnIndex(column) - if err != nil { - return err - } - return dt.SetTensorIndex(ci, row, val) -} - -// SetTensorFloat1D sets the tensor cell's float cell value at given 1D index within cell, -// at given column (by name), row index for columns that have n-dimensional tensors. -// Returns true if set. -func (dt *Table) SetTensorFloat1D(column string, row int, idx int, val float64) error { - if err := dt.IsValidRow(row); err != nil { - return err - } - ct, err := dt.ColumnByName(column) - if err != nil { - return err - } - _, sz := ct.RowCellSize() - if idx >= sz || idx < 0 { - return fmt.Errorf("table.Table IsValidRow: index %d is out of valid range [0..%d]", idx, sz) - } - off := row*sz + idx - ct.SetFloat1D(off, val) - return nil -} - -////////////////////////////////////////////////////////////////////////////////////// -// Copy Cell - -// CopyCell copies into cell at given column, row from cell in other table. -// It is robust to differences in type; uses destination cell type. -// Returns error if column names are invalid. -func (dt *Table) CopyCell(column string, row int, cpt *Table, cpColNm string, cpRow int) error { - ct, err := dt.ColumnByName(column) - if err != nil { - return err - } - cpct, err := cpt.ColumnByName(cpColNm) - if err != nil { - return err - } - _, sz := ct.RowCellSize() - if sz == 1 { - if ct.IsString() { - ct.SetString1D(row, cpct.String1D(cpRow)) - return nil - } - ct.SetFloat1D(row, cpct.Float1D(cpRow)) - return nil - } - _, cpsz := cpct.RowCellSize() - st := row * sz - cst := cpRow * cpsz - msz := min(sz, cpsz) - if ct.IsString() { - for j := 0; j < msz; j++ { - ct.SetString1D(st+j, cpct.String1D(cst+j)) - } - } else { - for j := 0; j < msz; j++ { - ct.SetFloat1D(st+j, cpct.Float1D(cst+j)) - } - } - return nil -} diff --git a/tensor/table/table_test.go b/tensor/table/table_test.go deleted file mode 100644 index dbf34cdc43..0000000000 --- a/tensor/table/table_test.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package table - -import ( - "strconv" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestAdd3DCol(t *testing.T) { - dt := NewTable() - dt.AddFloat32TensorColumn("Values", []int{11, 1, 16}) - - col, err := dt.ColumnByName("Values") - if err != nil { - t.Error(err) - } - if col.NumDims() != 4 { - t.Errorf("Add4DCol: # of dims != 4\n") - } - - if col.Shape().DimSize(0) != 1 { - t.Errorf("Add4DCol: dim 0 len != 1, was: %v\n", col.Shape().DimSize(0)) - } - - if col.Shape().DimSize(1) != 11 { - t.Errorf("Add4DCol: dim 0 len != 11, was: %v\n", col.Shape().DimSize(1)) - } - - if col.Shape().DimSize(2) != 1 { - t.Errorf("Add4DCol: dim 0 len != 1, was: %v\n", col.Shape().DimSize(2)) - } - - if col.Shape().DimSize(3) != 16 { - t.Errorf("Add4DCol: dim 0 len != 16, was: %v\n", col.Shape().DimSize(3)) - } -} - -func NewTestTable() *Table { - dt := NewTable() - dt.AddStringColumn("Str") - dt.AddFloat64Column("Flt64") - dt.AddIntColumn("Int") - dt.SetNumRows(3) - for i := 0; i < dt.Rows; i++ { - dt.SetString("Str", i, strconv.Itoa(i)) - dt.SetFloat("Flt64", i, float64(i)) - dt.SetFloat("Int", i, float64(i)) - } - return dt -} - -func TestAppendRows(t *testing.T) { - st := NewTestTable() - dt := NewTestTable() - dt.AppendRows(st) - dt.AppendRows(st) - dt.AppendRows(st) - for j := 0; j < 3; j++ { - for i := 0; i < st.Rows; i++ { - sr := j*3 + i - ss := st.StringValue("Str", i) - ds := dt.StringValue("Str", sr) - assert.Equal(t, ss, ds) - - sf := st.Float("Flt64", i) - df := dt.Float("Flt64", sr) - assert.Equal(t, sf, df) - - sf = st.Float("Int", i) - df = dt.Float("Int", sr) - assert.Equal(t, sf, df) - } - } -} diff --git a/tensor/table/testdata/emer_simple_lines_5x5.dat b/tensor/table/testdata/emer_simple_lines_5x5.dat deleted file mode 100644 index 407c87e418..0000000000 --- a/tensor/table/testdata/emer_simple_lines_5x5.dat +++ /dev/null @@ -1,8 +0,0 @@ -_H: $Name %Input[2:0,0]<2:5,5> %Input[2:1,0] %Input[2:2,0] %Input[2:3,0] %Input[2:4,0] %Input[2:0,1] %Input[2:1,1] %Input[2:2,1] %Input[2:3,1] %Input[2:4,1] %Input[2:0,2] %Input[2:1,2] %Input[2:2,2] %Input[2:3,2] %Input[2:4,2] %Input[2:0,3] %Input[2:1,3] %Input[2:2,3] %Input[2:3,3] %Input[2:4,3] %Input[2:0,4] %Input[2:1,4] %Input[2:2,4] %Input[2:3,4] %Input[2:4,4] %Output[2:0,0]<2:5,5> %Output[2:1,0] %Output[2:2,0] %Output[2:3,0] %Output[2:4,0] %Output[2:0,1] %Output[2:1,1] %Output[2:2,1] %Output[2:3,1] %Output[2:4,1] %Output[2:0,2] %Output[2:1,2] %Output[2:2,2] %Output[2:3,2] %Output[2:4,2] %Output[2:0,3] %Output[2:1,3] %Output[2:2,3] %Output[2:3,3] %Output[2:4,3] %Output[2:0,4] %Output[2:1,4] %Output[2:2,4] %Output[2:3,4] %Output[2:4,4] -_D: "checks ""and balances""" 1 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 1 1 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 1 -_D: "mitvert\nand fitvert" 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 -_D: "midhoriz" 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 -_D: "rdiag" 0 0 0 0 1 0 0 0 1 0 0 0 1 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 1 0 0 0 1 0 0 0 1 0 0 0 1 0 0 0 0 -_D: "ldiag" 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 -_D: "random" 1 0 0 0 0 0 0 0 1 0 0 1 0 0 0 1 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 1 0 0 1 1 0 0 0 0 0 0 0 0 0 - diff --git a/tensor/table/testdata/emer_simple_lines_5x5_rec.dat b/tensor/table/testdata/emer_simple_lines_5x5_rec.dat deleted file mode 100644 index dd4d67baf4..0000000000 --- a/tensor/table/testdata/emer_simple_lines_5x5_rec.dat +++ /dev/null @@ -1,7 +0,0 @@ -$Name %Input[2:0,0]<2:5,5> %Input[2:0,1] %Input[2:0,2] %Input[2:0,3] %Input[2:0,4] %Input[2:1,0] %Input[2:1,1] %Input[2:1,2] %Input[2:1,3] %Input[2:1,4] %Input[2:2,0] %Input[2:2,1] %Input[2:2,2] %Input[2:2,3] %Input[2:2,4] %Input[2:3,0] %Input[2:3,1] %Input[2:3,2] %Input[2:3,3] %Input[2:3,4] %Input[2:4,0] %Input[2:4,1] %Input[2:4,2] %Input[2:4,3] %Input[2:4,4] %Output[2:0,0]<2:5,5> %Output[2:0,1] %Output[2:0,2] %Output[2:0,3] %Output[2:0,4] %Output[2:1,0] %Output[2:1,1] %Output[2:1,2] %Output[2:1,3] %Output[2:1,4] %Output[2:2,0] %Output[2:2,1] %Output[2:2,2] %Output[2:2,3] %Output[2:2,4] %Output[2:3,0] %Output[2:3,1] %Output[2:3,2] %Output[2:3,3] %Output[2:3,4] %Output[2:4,0] %Output[2:4,1] %Output[2:4,2] %Output[2:4,3] %Output[2:4,4] -"checks ""and balances""" 1 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 1 1 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 1 -mitvert\nand fitvert 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 -midhoriz 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 -rdiag 0 0 0 0 1 0 0 0 1 0 0 0 1 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 1 0 0 0 1 0 0 0 1 0 0 0 1 0 0 0 0 -ldiag 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 -random 1 0 0 0 0 0 0 0 1 0 0 1 0 0 0 1 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 1 0 0 1 1 0 0 0 0 0 0 0 0 0 diff --git a/tensor/table/typegen.go b/tensor/table/typegen.go deleted file mode 100644 index 92eec5f9c5..0000000000 --- a/tensor/table/typegen.go +++ /dev/null @@ -1,11 +0,0 @@ -// Code generated by "core generate"; DO NOT EDIT. - -package table - -import ( - "cogentcore.org/core/types" -) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/tensor/table.IndexView", IDName: "index-view", Doc: "IndexView is an indexed wrapper around an table.Table that provides a\nspecific view onto the Table defined by the set of indexes.\nThis provides an efficient way of sorting and filtering a table by only\nupdating the indexes while doing nothing to the Table itself.\nTo produce a table that has data actually organized according to the\nindexed order, call the NewTable method.\nIndexView views on a table can also be organized together as Splits\nof the table rows, e.g., by grouping values along a given column.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Methods: []types.Method{{Name: "Sequential", Doc: "Sequential sets indexes to sequential row-wise indexes into table", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "SortColumnName", Doc: "SortColumnName sorts the indexes into our Table according to values in\ngiven column name, using either ascending or descending order.\nOnly valid for 1-dimensional columns.\nReturns error if column name not found.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"column", "ascending"}, Returns: []string{"error"}}, {Name: "FilterColumnName", Doc: "FilterColumnName filters the indexes into our Table according to values in\ngiven column name, using string representation of column values.\nIncludes rows with matching values unless exclude is set.\nIf contains, only checks if row contains string; if ignoreCase, ignores case.\nUse named args for greater clarity.\nOnly valid for 1-dimensional columns.\nReturns error if column name not found.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"column", "str", "exclude", "contains", "ignoreCase"}, Returns: []string{"error"}}, {Name: "AddRows", Doc: "AddRows adds n rows to end of underlying Table, and to the indexes in this view", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"n"}}, {Name: "SaveCSV", Doc: "SaveCSV writes a table index view to a comma-separated-values (CSV) file\n(where comma = any delimiter, specified in the delim arg).\nIf headers = true then generate column headers that capture the type\nand tensor cell geometry of the columns, enabling full reloading\nof exactly the same table format and data (recommended).\nOtherwise, only the data is written.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"filename", "delim", "headers"}, Returns: []string{"error"}}, {Name: "OpenCSV", Doc: "OpenCSV reads a table idx view from a comma-separated-values (CSV) file\n(where comma = any delimiter, specified in the delim arg),\nusing the Go standard encoding/csv reader conforming to the official CSV standard.\nIf the table does not currently have any columns, the first row of the file\nis assumed to be headers, and columns are constructed therefrom.\nIf the file was saved from table with headers, then these have full configuration\ninformation for tensor type and dimensionality.\nIf the table DOES have existing columns, then those are used robustly\nfor whatever information fits from each row of the file.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"filename", "delim"}, Returns: []string{"error"}}}, Fields: []types.Field{{Name: "Table", Doc: "Table that we are an indexed view onto"}, {Name: "Indexes", Doc: "current indexes into Table"}, {Name: "lessFunc", Doc: "current Less function used in sorting"}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/tensor/table.Table", IDName: "table", Doc: "Table is a table of data, with columns of tensors,\neach with the same number of Rows (outer-most dimension).", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Methods: []types.Method{{Name: "SaveCSV", Doc: "SaveCSV writes a table to a comma-separated-values (CSV) file\n(where comma = any delimiter, specified in the delim arg).\nIf headers = true then generate column headers that capture the type\nand tensor cell geometry of the columns, enabling full reloading\nof exactly the same table format and data (recommended).\nOtherwise, only the data is written.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"filename", "delim", "headers"}, Returns: []string{"error"}}, {Name: "OpenCSV", Doc: "OpenCSV reads a table from a comma-separated-values (CSV) file\n(where comma = any delimiter, specified in the delim arg),\nusing the Go standard encoding/csv reader conforming to the official CSV standard.\nIf the table does not currently have any columns, the first row of the file\nis assumed to be headers, and columns are constructed therefrom.\nIf the file was saved from table with headers, then these have full configuration\ninformation for tensor type and dimensionality.\nIf the table DOES have existing columns, then those are used robustly\nfor whatever information fits from each row of the file.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"filename", "delim"}, Returns: []string{"error"}}, {Name: "AddRows", Doc: "AddRows adds n rows to each of the columns", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"n"}}, {Name: "SetNumRows", Doc: "SetNumRows sets the number of rows in the table, across all columns\nif rows = 0 then effective number of rows in tensors is 1, as this dim cannot be 0", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"rows"}, Returns: []string{"Table"}}}, Fields: []types.Field{{Name: "Columns", Doc: "columns of data, as tensor.Tensor tensors"}, {Name: "ColumnNames", Doc: "the names of the columns"}, {Name: "Rows", Doc: "number of rows, which is enforced to be the size of the outer-most dimension of the column tensors"}, {Name: "ColumnNameMap", Doc: "the map of column names to column numbers"}, {Name: "MetaData", Doc: "misc meta data for the table. We use lower-case key names following the struct tag convention: name = name of table; desc = description; read-only = gui is read-only; precision = n for precision to write out floats in csv. For Column-specific data, we look for ColumnName: prefix, specifically ColumnName:desc = description of the column contents, which is shown as tooltip in the tensorcore.Table, and :width for width of a column"}}}) diff --git a/tensor/table/util.go b/tensor/table/util.go deleted file mode 100644 index a631a168cd..0000000000 --- a/tensor/table/util.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package table - -import ( - "errors" - "fmt" - "reflect" - "strings" - - "cogentcore.org/core/tensor" -) - -// InsertKeyColumns returns a copy of the given Table with new columns -// having given values, inserted at the start, used as legend keys etc. -// args must be in pairs: column name, value. All rows get the same value. -func (dt *Table) InsertKeyColumns(args ...string) *Table { - n := len(args) - if n%2 != 0 { - fmt.Println("InsertKeyColumns requires even number of args as column name, value pairs") - return dt - } - c := dt.Clone() - nc := n / 2 - for j := range nc { - colNm := args[2*j] - val := args[2*j+1] - col := tensor.NewString([]int{c.Rows}) - c.InsertColumn(col, colNm, 0) - for i := range col.Values { - col.Values[i] = val - } - } - return c -} - -// ConfigFromTable configures the columns of this table according to the -// values in the first two columns of given format table, conventionally named -// Name, Type (but names are not used), which must be of the string type. -func (dt *Table) ConfigFromTable(ft *Table) error { - nmcol := ft.Columns[0] - tycol := ft.Columns[1] - var errs []error - for i := range ft.Rows { - name := nmcol.String1D(i) - typ := strings.ToLower(tycol.String1D(i)) - kind := reflect.Float64 - switch typ { - case "string": - kind = reflect.String - case "bool": - kind = reflect.Bool - case "float32": - kind = reflect.Float32 - case "float64": - kind = reflect.Float64 - case "int": - kind = reflect.Int - case "int32": - kind = reflect.Int32 - case "byte", "uint8": - kind = reflect.Uint8 - default: - err := fmt.Errorf("ConfigFromTable: type string %q not recognized", typ) - errs = append(errs, err) - } - dt.AddColumnOfType(kind, name) - } - return errors.Join(errs...) -} diff --git a/tensor/tensor.go b/tensor/tensor.go deleted file mode 100644 index d0953711a0..0000000000 --- a/tensor/tensor.go +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tensor - -//go:generate core generate - -import ( - "fmt" - "reflect" - - "gonum.org/v1/gonum/mat" -) - -// todo: add a conversion function to copy data from Column-Major to a tensor: -// It is also possible to use Column-Major order, which is used in R, Julia, and MATLAB -// where the inner-most index is first and outer-most last. - -// Tensor is the interface for n-dimensional tensors. -// Per C / Go / Python conventions, indexes are Row-Major, ordered from -// outer to inner left-to-right, so the inner-most is right-most. -// It is implemented by the Base and Number generic types specialized -// by different concrete types: float64, float32, int, int32, byte, -// string, bits (bools). -// For float32 and float64 values, use NaN to indicate missing values. -// All of the data analysis and plot packages skip NaNs. -type Tensor interface { - fmt.Stringer - mat.Matrix - - // Shape returns a pointer to the shape that fully parametrizes the tensor shape - Shape() *Shape - - // Len returns the number of elements in the tensor (product of shape dimensions). - Len() int - - // NumDims returns the total number of dimensions. - NumDims() int - - // DimSize returns size of given dimension - DimSize(dim int) int - - // RowCellSize returns the size of the outer-most Row shape dimension, - // and the size of all the remaining inner dimensions (the "cell" size). - // Used for Tensors that are columns in a data table. - RowCellSize() (rows, cells int) - - // DataType returns the type of the data elements in the tensor. - // Bool is returned for the Bits tensor type. - DataType() reflect.Kind - - // Sizeof returns the number of bytes contained in the Values of this tensor. - // for String types, this is just the string pointers. - Sizeof() int64 - - // Bytes returns the underlying byte representation of the tensor values. - // This is the actual underlying data, so make a copy if it can be - // unintentionally modified or retained more than for immediate use. - Bytes() []byte - - // returns true if the data type is a String. otherwise is numeric. - IsString() bool - - // Float returns the value of given index as a float64. - Float(i []int) float64 - - // SetFloat sets the value of given index as a float64 - SetFloat(i []int, val float64) - - // NOTE: String conflicts with [fmt.Stringer], so we have to use StringValue - - // StringValue returns the value of given index as a string - StringValue(i []int) string - - // SetString sets the value of given index as a string - SetString(i []int, val string) - - // Float1D returns the value of given 1-dimensional index (0-Len()-1) as a float64 - Float1D(i int) float64 - - // SetFloat1D sets the value of given 1-dimensional index (0-Len()-1) as a float64 - SetFloat1D(i int, val float64) - - // FloatRowCell returns the value at given row and cell, where row is outer-most dim, - // and cell is 1D index into remaining inner dims. For Table columns. - FloatRowCell(row, cell int) float64 - - // SetFloatRowCell sets the value at given row and cell, where row is outer-most dim, - // and cell is 1D index into remaining inner dims. For Table columns. - SetFloatRowCell(row, cell int, val float64) - - // Floats sets []float64 slice of all elements in the tensor - // (length is ensured to be sufficient). - // This can be used for all of the gonum/floats methods - // for basic math, gonum/stats, etc. - Floats(flt *[]float64) - - // SetFloats sets tensor values from a []float64 slice (copies values). - SetFloats(vals []float64) - - // String1D returns the value of given 1-dimensional index (0-Len()-1) as a string - String1D(i int) string - - // SetString1D sets the value of given 1-dimensional index (0-Len()-1) as a string - SetString1D(i int, val string) - - // StringRowCell returns the value at given row and cell, where row is outer-most dim, - // and cell is 1D index into remaining inner dims. For Table columns - StringRowCell(row, cell int) string - - // SetStringRowCell sets the value at given row and cell, where row is outer-most dim, - // and cell is 1D index into remaining inner dims. For Table columns - SetStringRowCell(row, cell int, val string) - - // SubSpace returns a new tensor with innermost subspace at given - // offset(s) in outermost dimension(s) (len(offs) < NumDims). - // The new tensor points to the values of the this tensor (i.e., modifications - // will affect both), as its Values slice is a view onto the original (which - // is why only inner-most contiguous supsaces are supported). - // Use Clone() method to separate the two. - SubSpace(offs []int) Tensor - - // Range returns the min, max (and associated indexes, -1 = no values) for the tensor. - // This is needed for display and is thus in the core api in optimized form - // Other math operations can be done using gonum/floats package. - Range() (min, max float64, minIndex, maxIndex int) - - // SetZeros is simple convenience function initialize all values to 0 - SetZeros() - - // Clone clones this tensor, creating a duplicate copy of itself with its - // own separate memory representation of all the values, and returns - // that as a Tensor (which can be converted into the known type as needed). - Clone() Tensor - - // CopyFrom copies all avail values from other tensor into this tensor, with an - // optimized implementation if the other tensor is of the same type, and - // otherwise it goes through appropriate standard type. - CopyFrom(from Tensor) - - // CopyShapeFrom copies just the shape from given source tensor - // calling SetShape with the shape params from source (see for more docs). - CopyShapeFrom(from Tensor) - - // CopyCellsFrom copies given range of values from other tensor into this tensor, - // using flat 1D indexes: to = starting index in this Tensor to start copying into, - // start = starting index on from Tensor to start copying from, and n = number of - // values to copy. Uses an optimized implementation if the other tensor is - // of the same type, and otherwise it goes through appropriate standard type. - CopyCellsFrom(from Tensor, to, start, n int) - - // SetShape sets the sizes parameters of the tensor, and resizes backing storage appropriately. - // existing names will be preserved if not presented. - SetShape(sizes []int, names ...string) - - // SetNumRows sets the number of rows (outer-most dimension). - SetNumRows(rows int) - - // SetMetaData sets a key=value meta data (stored as a map[string]string). - // For TensorGrid display: top-zero=+/-, odd-row=+/-, image=+/-, - // min, max set fixed min / max values, background=color - SetMetaData(key, val string) - - // MetaData retrieves value of given key, bool = false if not set - MetaData(key string) (string, bool) - - // MetaDataMap returns the underlying map used for meta data - MetaDataMap() map[string]string - - // CopyMetaData copies meta data from given source tensor - CopyMetaData(from Tensor) -} - -// New returns a new n-dimensional tensor of given value type -// with the given sizes per dimension (shape), and optional dimension names. -func New[T string | bool | float32 | float64 | int | int32 | byte](sizes []int, names ...string) Tensor { - var v T - switch any(v).(type) { - case string: - return NewString(sizes, names...) - case bool: - return NewBits(sizes, names...) - case float64: - return NewNumber[float64](sizes, names...) - case float32: - return NewNumber[float32](sizes, names...) - case int: - return NewNumber[int](sizes, names...) - case int32: - return NewNumber[int32](sizes, names...) - case byte: - return NewNumber[byte](sizes, names...) - default: - panic("tensor.New: unexpected error: type not supported") - } -} - -// NewOfType returns a new n-dimensional tensor of given reflect.Kind type -// with the given sizes per dimension (shape), and optional dimension names. -// Supported types are string, bool (for [Bits]), float32, float64, int, int32, and byte. -func NewOfType(typ reflect.Kind, sizes []int, names ...string) Tensor { - switch typ { - case reflect.String: - return NewString(sizes, names...) - case reflect.Bool: - return NewBits(sizes, names...) - case reflect.Float64: - return NewNumber[float64](sizes, names...) - case reflect.Float32: - return NewNumber[float32](sizes, names...) - case reflect.Int: - return NewNumber[int](sizes, names...) - case reflect.Int32: - return NewNumber[int32](sizes, names...) - case reflect.Uint8: - return NewNumber[byte](sizes, names...) - default: - panic(fmt.Sprintf("tensor.NewOfType: type not supported: %v", typ)) - } -} - -// CopyDense copies a gonum mat.Dense matrix into given Tensor -// using standard Float64 interface -func CopyDense(to Tensor, dm *mat.Dense) { - nr, nc := dm.Dims() - to.SetShape([]int{nr, nc}) - idx := 0 - for ri := 0; ri < nr; ri++ { - for ci := 0; ci < nc; ci++ { - v := dm.At(ri, ci) - to.SetFloat1D(idx, v) - idx++ - } - } -} diff --git a/tensor/tensor_test.go b/tensor/tensor_test.go deleted file mode 100644 index 2efa6c1ba4..0000000000 --- a/tensor/tensor_test.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright (c) 2024, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tensor - -import ( - "reflect" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestTensorString(t *testing.T) { - shp := []int{4, 2} - nms := []string{"Row", "Vals"} - tsr := New[string](shp, nms...) - assert.Equal(t, 8, tsr.Len()) - assert.Equal(t, true, tsr.IsString()) - assert.Equal(t, reflect.String, tsr.DataType()) - assert.Equal(t, 2, tsr.SubSpace([]int{0}).Len()) - r, c := tsr.RowCellSize() - assert.Equal(t, 4, r) - assert.Equal(t, 2, c) - - tsr.SetString([]int{2, 0}, "test") - assert.Equal(t, "test", tsr.StringValue([]int{2, 0})) - tsr.SetString1D(5, "testing") - assert.Equal(t, "testing", tsr.StringValue([]int{2, 1})) - assert.Equal(t, "test", tsr.String1D(4)) - - assert.Equal(t, "test", tsr.StringRowCell(2, 0)) - assert.Equal(t, "testing", tsr.StringRowCell(2, 1)) - assert.Equal(t, "", tsr.StringRowCell(3, 0)) - - cln := tsr.Clone() - assert.Equal(t, "testing", cln.StringValue([]int{2, 1})) - - cln.SetZeros() - assert.Equal(t, "", cln.StringValue([]int{2, 1})) - assert.Equal(t, "testing", tsr.StringValue([]int{2, 1})) - - tsr.SetShape([]int{2, 4}, "Vals", "Row") - assert.Equal(t, "test", tsr.StringValue([]int{1, 0})) - assert.Equal(t, "testing", tsr.StringValue([]int{1, 1})) - - cln.SetString1D(5, "ctesting") - cln.CopyShapeFrom(tsr) - assert.Equal(t, "ctesting", cln.StringValue([]int{1, 1})) - - cln.CopyCellsFrom(tsr, 5, 4, 2) - assert.Equal(t, "test", cln.StringValue([]int{1, 1})) - assert.Equal(t, "testing", cln.StringValue([]int{1, 2})) - - tsr.SetNumRows(5) - assert.Equal(t, 20, tsr.Len()) - - tsr.SetMetaData("name", "test") - nm, has := tsr.MetaData("name") - assert.Equal(t, "test", nm) - assert.Equal(t, true, has) - _, has = tsr.MetaData("type") - assert.Equal(t, false, has) - - var flt []float64 - cln.SetString1D(0, "3.14") - assert.Equal(t, 3.14, cln.Float1D(0)) - - cln.Floats(&flt) - assert.Equal(t, 3.14, flt[0]) - assert.Equal(t, 0.0, flt[1]) -} - -func TestTensorFloat64(t *testing.T) { - shp := []int{4, 2} - nms := []string{"Row", "Vals"} - tsr := New[float64](shp, nms...) - assert.Equal(t, 8, tsr.Len()) - assert.Equal(t, false, tsr.IsString()) - assert.Equal(t, reflect.Float64, tsr.DataType()) - assert.Equal(t, 2, tsr.SubSpace([]int{0}).Len()) - r, c := tsr.RowCellSize() - assert.Equal(t, 4, r) - assert.Equal(t, 2, c) - - tsr.SetFloat([]int{2, 0}, 3.14) - assert.Equal(t, 3.14, tsr.Float([]int{2, 0})) - tsr.SetFloat1D(5, 2.17) - assert.Equal(t, 2.17, tsr.Float([]int{2, 1})) - assert.Equal(t, 3.14, tsr.Float1D(4)) - - assert.Equal(t, 3.14, tsr.FloatRowCell(2, 0)) - assert.Equal(t, 2.17, tsr.FloatRowCell(2, 1)) - assert.Equal(t, 0.0, tsr.FloatRowCell(3, 0)) - - cln := tsr.Clone() - assert.Equal(t, 2.17, cln.Float([]int{2, 1})) - - cln.SetZeros() - assert.Equal(t, 0.0, cln.Float([]int{2, 1})) - assert.Equal(t, 2.17, tsr.Float([]int{2, 1})) - - tsr.SetShape([]int{2, 4}, "Vals", "Row") - assert.Equal(t, 3.14, tsr.Float([]int{1, 0})) - assert.Equal(t, 2.17, tsr.Float([]int{1, 1})) - - cln.SetFloat1D(5, 9.9) - cln.CopyShapeFrom(tsr) - assert.Equal(t, 9.9, cln.Float([]int{1, 1})) - - cln.CopyCellsFrom(tsr, 5, 4, 2) - assert.Equal(t, 3.14, cln.Float([]int{1, 1})) - assert.Equal(t, 2.17, cln.Float([]int{1, 2})) - - tsr.SetNumRows(5) - assert.Equal(t, 20, tsr.Len()) - - tsr.SetMetaData("name", "test") - nm, has := tsr.MetaData("name") - assert.Equal(t, "test", nm) - assert.Equal(t, true, has) - _, has = tsr.MetaData("type") - assert.Equal(t, false, has) - - var flt []float64 - cln.SetString1D(0, "3.14") - assert.Equal(t, 3.14, cln.Float1D(0)) - - cln.Floats(&flt) - assert.Equal(t, 3.14, flt[0]) - assert.Equal(t, 0.0, flt[1]) -} diff --git a/tensor/tensorcore/README.md b/tensor/tensorcore/README.md deleted file mode 100644 index 2938d56947..0000000000 --- a/tensor/tensorcore/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# tensorcore - -`tensorcore` provides GUI Views of `table.Table` and `tensor.Tensor` structures as Cogent Core Widgets. - -Add this to `import` to get these views to be registered with the Cogent Core Value system: - -```Go - _ "cogentcore.org/core/tensor/tensorcore" // include to get GUI views -``` - -* `Table` provides a row-and-column tabular GUI interface, similar to a spreadsheet, for viewing and editing Table data. Any higher-dimensional tensor columns are shown as TensorGrid elements that can be clicked to open a TensorView editor with actual numeric values in a similar spreadsheet-like GUI. - -* `TensorView` provides a spreadsheet-like GUI for viewing and editing tensor data. - -* `TensorGrid` provides a 2D colored grid display of tensor data, collapsing any higher dimensions down to 2D. Different core.ColorMaps can be used to translate values into colors. - diff --git a/tensor/tensorcore/doc.go b/tensor/tensorcore/doc.go deleted file mode 100644 index 070d6429c6..0000000000 --- a/tensor/tensorcore/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (c) 2019, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package tensorcore provides GUI Views of table Table and Tensor structures -using the Cogent Core views framework. - -* Table provides a row-and-column tabular GUI interface, similar to -a spreadsheet, for viewing and editing Table data. Any higher-dimensional -tensor columns are shown as TensorGrid elements that can be clicked to -open a TensorView editor with actual numeric values in a similar spreadsheet- -like GUI. - -* TensorView provides a spreadsheet-like GUI for viewing and editing -tensor data. - -* TensorGrid provides a 2D colored grid display of tensor data, collapsing -any higher dimensions down to 2D. Different core.ColorMaps can be used -to translate values into colors. -*/ -package tensorcore diff --git a/tensor/tensorcore/simatgrid.go b/tensor/tensorcore/simatgrid.go deleted file mode 100644 index a134c3199f..0000000000 --- a/tensor/tensorcore/simatgrid.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright (c) 2019, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tensorcore - -import ( - "cogentcore.org/core/colors" - "cogentcore.org/core/math32" - "cogentcore.org/core/paint" - "cogentcore.org/core/styles" - "cogentcore.org/core/tensor" - "cogentcore.org/core/tensor/stats/simat" -) - -const LabelSpace = float32(8) - -// SimMatGrid is a widget that displays a similarity / distance matrix -// with tensor values as a grid of colored squares, and labels for rows and columns. -type SimMatGrid struct { //types:add - TensorGrid - - // the similarity / distance matrix - SimMat *simat.SimMat `set:"-"` - - rowMaxSz math32.Vector2 // maximum label size - rowMinBlank int // minimum number of blank rows - rowNGps int // number of groups in row (non-blank after blank) - colMaxSz math32.Vector2 // maximum label size - colMinBlank int // minimum number of blank cols - colNGps int // number of groups in col (non-blank after blank) -} - -// Defaults sets defaults for values that are at nonsensical initial values -func (tg *SimMatGrid) Init() { - tg.TensorGrid.Init() - tg.Display.GridView = &tg.TensorGrid - tg.Display.Defaults() - tg.Display.TopZero = true - -} - -// SetSimMat sets the similarity matrix and triggers a display update -func (tg *SimMatGrid) SetSimMat(smat *simat.SimMat) *SimMatGrid { - tg.SimMat = smat - tg.Tensor = smat.Mat - if tg.Tensor != nil { - tg.Display.FromMeta(tg.Tensor) - } - tg.Update() - return tg -} - -func (tg *SimMatGrid) SizeLabel(lbs []string, col bool) (minBlank, ngps int, sz math32.Vector2) { - mx := 0 - mxi := 0 - minBlank = len(lbs) - if minBlank == 0 { - return - } - curblk := 0 - ngps = 0 - for i, lb := range lbs { - l := len(lb) - if l == 0 { - curblk++ - } else { - if curblk > 0 { - ngps++ - } - if i > 0 { - minBlank = min(minBlank, curblk) - } - curblk = 0 - if l > mx { - mx = l - mxi = i - } - } - } - minBlank = min(minBlank, curblk) - tr := paint.Text{} - fr := tg.Styles.FontRender() - if col { - tr.SetStringRot90(lbs[mxi], fr, &tg.Styles.UnitContext, &tg.Styles.Text, true, 0) - } else { - tr.SetString(lbs[mxi], fr, &tg.Styles.UnitContext, &tg.Styles.Text, true, 0, 0) - } - tsz := tg.Geom.Size.Actual.Content - if !col { - tr.LayoutStdLR(&tg.Styles.Text, fr, &tg.Styles.UnitContext, tsz) - } - return minBlank, ngps, tr.BBox.Size() -} - -func (tg *SimMatGrid) SizeUp() { - tg.rowMinBlank, tg.rowNGps, tg.rowMaxSz = tg.SizeLabel(tg.SimMat.Rows, false) - tg.colMinBlank, tg.colNGps, tg.colMaxSz = tg.SizeLabel(tg.SimMat.Columns, true) - - tg.colMaxSz.Y += tg.rowMaxSz.Y // needs one more for some reason - - rtxtsz := tg.rowMaxSz.Y / float32(tg.rowMinBlank+1) - ctxtsz := tg.colMaxSz.X / float32(tg.colMinBlank+1) - txtsz := math32.Max(rtxtsz, ctxtsz) - - rows, cols, _, _ := tensor.Projection2DShape(tg.Tensor.Shape(), tg.Display.OddRow) - rowEx := tg.rowNGps - colEx := tg.colNGps - frw := float32(rows) + float32(rowEx)*tg.Display.DimExtra // extra spacing - fcl := float32(cols) + float32(colEx)*tg.Display.DimExtra // extra spacing - max := float32(math32.Max(frw, fcl)) - gsz := tg.Display.TotPrefSize / max - gsz = math32.Max(gsz, tg.Display.GridMinSize) - gsz = math32.Max(gsz, txtsz) - gsz = math32.Min(gsz, tg.Display.GridMaxSize) - minsz := math32.Vec2(tg.rowMaxSz.X+LabelSpace+gsz*float32(cols), tg.colMaxSz.Y+LabelSpace+gsz*float32(rows)) - sz := &tg.Geom.Size - sz.FitSizeMax(&sz.Actual.Content, minsz) -} - -func (tg *SimMatGrid) Render() { - if tg.SimMat == nil || tg.SimMat.Mat.Len() == 0 { - return - } - tg.EnsureColorMap() - tg.UpdateRange() - pc := &tg.Scene.PaintContext - - pos := tg.Geom.Pos.Content - sz := tg.Geom.Size.Actual.Content - - effsz := sz - effsz.X -= tg.rowMaxSz.X + LabelSpace - effsz.Y -= tg.colMaxSz.Y + LabelSpace - - pc.FillBox(pos, sz, tg.Styles.Background) - - tsr := tg.SimMat.Mat - - rows, cols, _, _ := tensor.Projection2DShape(tsr.Shape(), tg.Display.OddRow) - rowEx := tg.rowNGps - colEx := tg.colNGps - frw := float32(rows) + float32(rowEx)*tg.Display.DimExtra // extra spacing - fcl := float32(cols) + float32(colEx)*tg.Display.DimExtra // extra spacing - tsz := math32.Vec2(fcl, frw) - gsz := effsz.Div(tsz) - - // Render Rows - epos := pos - epos.Y += tg.colMaxSz.Y + LabelSpace - nr := len(tg.SimMat.Rows) - mx := min(nr, rows) - tr := paint.Text{} - txsty := tg.Styles.Text - txsty.AlignV = styles.Start - ygp := 0 - prvyblk := false - fr := tg.Styles.FontRender() - for y := 0; y < mx; y++ { - lb := tg.SimMat.Rows[y] - if len(lb) == 0 { - prvyblk = true - continue - } - if prvyblk { - ygp++ - prvyblk = false - } - yex := float32(ygp) * tg.Display.DimExtra - tr.SetString(lb, fr, &tg.Styles.UnitContext, &txsty, true, 0, 0) - tr.LayoutStdLR(&txsty, fr, &tg.Styles.UnitContext, tg.rowMaxSz) - cr := math32.Vec2(0, float32(y)+yex) - pr := epos.Add(cr.Mul(gsz)) - tr.Render(pc, pr) - } - - // Render Cols - epos = pos - epos.X += tg.rowMaxSz.X + LabelSpace - nc := len(tg.SimMat.Columns) - mx = min(nc, cols) - xgp := 0 - prvxblk := false - for x := 0; x < mx; x++ { - lb := tg.SimMat.Columns[x] - if len(lb) == 0 { - prvxblk = true - continue - } - if prvxblk { - xgp++ - prvxblk = false - } - xex := float32(xgp) * tg.Display.DimExtra - tr.SetStringRot90(lb, fr, &tg.Styles.UnitContext, &tg.Styles.Text, true, 0) - cr := math32.Vec2(float32(x)+xex, 0) - pr := epos.Add(cr.Mul(gsz)) - tr.Render(pc, pr) - } - - pos.X += tg.rowMaxSz.X + LabelSpace - pos.Y += tg.colMaxSz.Y + LabelSpace - ssz := gsz.MulScalar(tg.Display.GridFill) // smaller size with margin - prvyblk = false - ygp = 0 - for y := 0; y < rows; y++ { - ylb := tg.SimMat.Rows[y] - if len(ylb) > 0 && prvyblk { - ygp++ - prvyblk = false - } - yex := float32(ygp) * tg.Display.DimExtra - prvxblk = false - xgp = 0 - for x := 0; x < cols; x++ { - xlb := tg.SimMat.Columns[x] - if len(xlb) > 0 && prvxblk { - xgp++ - prvxblk = false - } - xex := float32(xgp) * tg.Display.DimExtra - ey := y - if !tg.Display.TopZero { - ey = (rows - 1) - y - } - val := tensor.Projection2DValue(tsr, tg.Display.OddRow, ey, x) - cr := math32.Vec2(float32(x)+xex, float32(y)+yex) - pr := pos.Add(cr.Mul(gsz)) - _, clr := tg.Color(val) - pc.FillBox(pr, ssz, colors.Uniform(clr)) - if len(xlb) == 0 { - prvxblk = true - } - } - if len(ylb) == 0 { - prvyblk = true - } - } -} diff --git a/tensor/tensorcore/table.go b/tensor/tensorcore/table.go deleted file mode 100644 index 1bfe75e996..0000000000 --- a/tensor/tensorcore/table.go +++ /dev/null @@ -1,733 +0,0 @@ -// Copyright (c) 2023, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package tensorcore provides GUI Cogent Core widgets for tensor types. -package tensorcore - -//go:generate core generate - -import ( - "bytes" - "encoding/csv" - "fmt" - "image" - "log" - "strconv" - "strings" - - "cogentcore.org/core/base/errors" - "cogentcore.org/core/base/fileinfo" - "cogentcore.org/core/base/fileinfo/mimedata" - "cogentcore.org/core/core" - "cogentcore.org/core/events" - "cogentcore.org/core/icons" - "cogentcore.org/core/styles" - "cogentcore.org/core/styles/states" - "cogentcore.org/core/styles/units" - "cogentcore.org/core/tensor" - "cogentcore.org/core/tensor/table" - "cogentcore.org/core/tree" -) - -// Table provides a GUI widget for representing [table.Table] values. -type Table struct { - core.ListBase - - // the idx view of the table that we're a view of - Table *table.IndexView `set:"-"` - - // overall display options for tensor display - TensorDisplay TensorDisplay `set:"-"` - - // per column tensor display params - ColumnTensorDisplay map[int]*TensorDisplay `set:"-"` - - // per column blank tensor values - ColumnTensorBlank map[int]*tensor.Float64 `set:"-"` - - // number of columns in table (as of last update) - NCols int `edit:"-"` - - // current sort index - SortIndex int - - // whether current sort order is descending - SortDescending bool - - // headerWidths has number of characters in each header, per visfields - headerWidths []int `copier:"-" display:"-" json:"-" xml:"-"` - - // colMaxWidths records maximum width in chars of string type fields - colMaxWidths []int `set:"-" copier:"-" json:"-" xml:"-"` - - // blank values for out-of-range rows - BlankString string - BlankFloat float64 -} - -// check for interface impl -var _ core.Lister = (*Table)(nil) - -func (tb *Table) Init() { - tb.ListBase.Init() - tb.SortIndex = -1 - tb.TensorDisplay.Defaults() - tb.ColumnTensorDisplay = map[int]*TensorDisplay{} - tb.ColumnTensorBlank = map[int]*tensor.Float64{} - - tb.Makers.Normal[0] = func(p *tree.Plan) { // TODO: reduce redundancy with ListBase Maker - svi := tb.This.(core.Lister) - svi.UpdateSliceSize() - - scrollTo := -1 - if tb.InitSelectedIndex >= 0 { - tb.SelectedIndex = tb.InitSelectedIndex - tb.InitSelectedIndex = -1 - scrollTo = tb.SelectedIndex - } - if scrollTo >= 0 { - tb.ScrollToIndex(scrollTo) - } - - tb.UpdateStartIndex() - tb.UpdateMaxWidths() - - tb.Updater(func() { - tb.UpdateStartIndex() - }) - - tb.MakeHeader(p) - tb.MakeGrid(p, func(p *tree.Plan) { - for i := 0; i < tb.VisibleRows; i++ { - svi.MakeRow(p, i) - } - }) - } -} - -func (tb *Table) SliceIndex(i int) (si, vi int, invis bool) { - si = tb.StartIndex + i - vi = -1 - if si < len(tb.Table.Indexes) { - vi = tb.Table.Indexes[si] - } - invis = vi < 0 - return -} - -// StyleValue performs additional value widget styling -func (tb *Table) StyleValue(w core.Widget, s *styles.Style, row, col int) { - hw := float32(tb.headerWidths[col]) - if col == tb.SortIndex { - hw += 6 - } - if len(tb.colMaxWidths) > col { - hw = max(float32(tb.colMaxWidths[col]), hw) - } - hv := units.Ch(hw) - s.Min.X.Value = max(s.Min.X.Value, hv.Convert(s.Min.X.Unit, &s.UnitContext).Value) - s.SetTextWrap(false) -} - -// SetTable sets the source table that we are viewing, using a sequential IndexView -// and then configures the display -func (tb *Table) SetTable(et *table.Table) *Table { - if et == nil { - return nil - } - - tb.Table = table.NewIndexView(et) - tb.This.(core.Lister).UpdateSliceSize() - tb.SetSliceBase() - tb.Update() - return tb -} - -// SetSlice sets the source table to a [table.NewSliceTable] -// from the given slice. -func (tb *Table) SetSlice(sl any) *Table { - return tb.SetTable(errors.Log1(table.NewSliceTable(sl))) -} - -// AsyncUpdateTable updates the display for asynchronous updating from -// other goroutines. Also updates indexview (calling Sequential). -func (tb *Table) AsyncUpdateTable() { - tb.AsyncLock() - tb.Table.Sequential() - tb.ScrollToIndexNoUpdate(tb.SliceSize - 1) - tb.Update() - tb.AsyncUnlock() -} - -// SetIndexView sets the source IndexView of a table (using a copy so original is not modified) -// and then configures the display -func (tb *Table) SetIndexView(ix *table.IndexView) *Table { - if ix == nil { - return tb - } - - tb.Table = ix.Clone() // always copy - - tb.This.(core.Lister).UpdateSliceSize() - tb.StartIndex = 0 - tb.VisibleRows = tb.MinRows - if !tb.IsReadOnly() { - tb.SelectedIndex = -1 - } - tb.ResetSelectedIndexes() - tb.SelectMode = false - tb.MakeIter = 0 - tb.Update() - return tb -} - -func (tb *Table) UpdateSliceSize() int { - tb.Table.DeleteInvalid() // table could have changed - if tb.Table.Len() == 0 { - tb.Table.Sequential() - } - tb.SliceSize = tb.Table.Len() - tb.NCols = tb.Table.Table.NumColumns() - return tb.SliceSize -} - -func (tb *Table) UpdateMaxWidths() { - if len(tb.headerWidths) != tb.NCols { - tb.headerWidths = make([]int, tb.NCols) - tb.colMaxWidths = make([]int, tb.NCols) - } - - if tb.SliceSize == 0 { - return - } - for fli := 0; fli < tb.NCols; fli++ { - tb.colMaxWidths[fli] = 0 - col := tb.Table.Table.Columns[fli] - stsr, isstr := col.(*tensor.String) - - if !isstr { - continue - } - mxw := 0 - for _, ixi := range tb.Table.Indexes { - if ixi >= 0 { - sval := stsr.Values[ixi] - mxw = max(mxw, len(sval)) - } - } - tb.colMaxWidths[fli] = mxw - } -} - -func (tb *Table) MakeHeader(p *tree.Plan) { - tree.AddAt(p, "header", func(w *core.Frame) { - core.ToolbarStyles(w) - w.FinalStyler(func(s *styles.Style) { - s.Padding.Zero() - s.Grow.Set(0, 0) - s.Gap.Set(units.Em(0.5)) // matches grid default - }) - w.Maker(func(p *tree.Plan) { - if tb.ShowIndexes { - tree.AddAt(p, "_head-index", func(w *core.Text) { // TODO: is not working - w.SetType(core.TextBodyMedium) - w.Styler(func(s *styles.Style) { - s.Align.Self = styles.Center - }) - w.SetText("Index") - }) - } - for fli := 0; fli < tb.NCols; fli++ { - field := tb.Table.Table.ColumnNames[fli] - tree.AddAt(p, "head-"+field, func(w *core.Button) { - w.SetType(core.ButtonAction) - w.Styler(func(s *styles.Style) { - s.Justify.Content = styles.Start - }) - w.OnClick(func(e events.Event) { - tb.SortSliceAction(fli) - }) - w.Updater(func() { - field := tb.Table.Table.ColumnNames[fli] - w.SetText(field).SetTooltip(field + " (tap to sort by)") - tb.headerWidths[fli] = len(field) - if fli == tb.SortIndex { - if tb.SortDescending { - w.SetIndicator(icons.KeyboardArrowDown) - } else { - w.SetIndicator(icons.KeyboardArrowUp) - } - } else { - w.SetIndicator(icons.Blank) - } - }) - }) - } - }) - }) -} - -// SliceHeader returns the Frame header for slice grid -func (tb *Table) SliceHeader() *core.Frame { - return tb.Child(0).(*core.Frame) -} - -// RowWidgetNs returns number of widgets per row and offset for index label -func (tb *Table) RowWidgetNs() (nWidgPerRow, idxOff int) { - nWidgPerRow = 1 + tb.NCols - idxOff = 1 - if !tb.ShowIndexes { - nWidgPerRow -= 1 - idxOff = 0 - } - return -} - -func (tb *Table) MakeRow(p *tree.Plan, i int) { - svi := tb.This.(core.Lister) - si, _, invis := svi.SliceIndex(i) - itxt := strconv.Itoa(i) - - if tb.ShowIndexes { - tb.MakeGridIndex(p, i, si, itxt, invis) - } - - for fli := 0; fli < tb.NCols; fli++ { - col := tb.Table.Table.Columns[fli] - valnm := fmt.Sprintf("value-%v.%v", fli, itxt) - - _, isstr := col.(*tensor.String) - if col.NumDims() == 1 { - str := "" - fval := float64(0) - tree.AddNew(p, valnm, func() core.Value { - if isstr { - return core.NewValue(&str, "") - } else { - return core.NewValue(&fval, "") - } - }, func(w core.Value) { - wb := w.AsWidget() - tb.MakeValue(w, i) - w.AsTree().SetProperty(core.ListColProperty, fli) - if !tb.IsReadOnly() { - wb.OnChange(func(e events.Event) { - if si < len(tb.Table.Indexes) { - if isstr { - tb.Table.Table.SetStringIndex(fli, tb.Table.Indexes[si], str) - } else { - tb.Table.Table.SetFloatIndex(fli, tb.Table.Indexes[si], fval) - } - } - tb.This.(core.Lister).UpdateMaxWidths() - tb.SendChange() - }) - } - wb.Updater(func() { - _, vi, invis := svi.SliceIndex(i) - if !invis { - if isstr { - str = tb.Table.Table.StringIndex(fli, vi) - core.Bind(&str, w) - } else { - fval = tb.Table.Table.FloatIndex(fli, vi) - core.Bind(&fval, w) - } - } else { - if isstr { - core.Bind(tb.BlankString, w) - } else { - core.Bind(tb.BlankFloat, w) - } - } - wb.SetReadOnly(tb.IsReadOnly()) - wb.SetState(invis, states.Invisible) - if svi.HasStyler() { - w.Style() - } - if invis { - wb.SetSelected(false) - } - }) - }) - } else { - tree.AddAt(p, valnm, func(w *TensorGrid) { - w.SetReadOnly(tb.IsReadOnly()) - wb := w.AsWidget() - w.SetProperty(core.ListRowProperty, i) - w.SetProperty(core.ListColProperty, fli) - w.Styler(func(s *styles.Style) { - s.Grow.Set(0, 0) - }) - wb.Updater(func() { - si, vi, invis := svi.SliceIndex(i) - var cell tensor.Tensor - if invis { - cell = tb.ColTensorBlank(fli, col) - } else { - cell = tb.Table.Table.TensorIndex(fli, vi) - } - wb.ValueTitle = tb.ValueTitle + "[" + strconv.Itoa(si) + "]" - w.SetState(invis, states.Invisible) - w.SetTensor(cell) - w.Display = *tb.GetColumnTensorDisplay(fli) - }) - }) - } - } -} - -// ColTensorBlank returns tensor blanks for given tensor col -func (tb *Table) ColTensorBlank(cidx int, col tensor.Tensor) *tensor.Float64 { - if ctb, has := tb.ColumnTensorBlank[cidx]; has { - return ctb - } - ctb := tensor.New[float64](col.Shape().Sizes, col.Shape().Names...).(*tensor.Float64) - tb.ColumnTensorBlank[cidx] = ctb - return ctb -} - -// GetColumnTensorDisplay returns tensor display parameters for this column -// either the overall defaults or the per-column if set -func (tb *Table) GetColumnTensorDisplay(col int) *TensorDisplay { - if ctd, has := tb.ColumnTensorDisplay[col]; has { - return ctd - } - if tb.Table != nil { - cl := tb.Table.Table.Columns[col] - if len(cl.MetaDataMap()) > 0 { - return tb.SetColumnTensorDisplay(col) - } - } - return &tb.TensorDisplay -} - -// SetColumnTensorDisplay sets per-column tensor display params and returns them -// if already set, just returns them -func (tb *Table) SetColumnTensorDisplay(col int) *TensorDisplay { - if ctd, has := tb.ColumnTensorDisplay[col]; has { - return ctd - } - ctd := &TensorDisplay{} - *ctd = tb.TensorDisplay - if tb.Table != nil { - cl := tb.Table.Table.Columns[col] - ctd.FromMeta(cl) - } - tb.ColumnTensorDisplay[col] = ctd - return ctd -} - -// NewAt inserts a new blank element at given index in the slice -- -1 -// means the end -func (tb *Table) NewAt(idx int) { - tb.NewAtSelect(idx) - - tb.Table.InsertRows(idx, 1) - - tb.SelectIndexEvent(idx, events.SelectOne) - tb.Update() - tb.IndexGrabFocus(idx) -} - -// DeleteAt deletes element at given index from slice -func (tb *Table) DeleteAt(idx int) { - if idx < 0 || idx >= tb.SliceSize { - return - } - tb.DeleteAtSelect(idx) - tb.Table.DeleteRows(idx, 1) - tb.Update() -} - -// SortSliceAction sorts the slice for given field index -- toggles ascending -// vs. descending if already sorting on this dimension -func (tb *Table) SortSliceAction(fldIndex int) { - sgh := tb.SliceHeader() - _, idxOff := tb.RowWidgetNs() - - for fli := 0; fli < tb.NCols; fli++ { - hdr := sgh.Child(idxOff + fli).(*core.Button) - hdr.SetType(core.ButtonAction) - if fli == fldIndex { - if tb.SortIndex == fli { - tb.SortDescending = !tb.SortDescending - } else { - tb.SortDescending = false - } - } - } - - tb.SortIndex = fldIndex - if fldIndex == -1 { - tb.Table.SortIndexes() - } else { - tb.Table.SortColumn(tb.SortIndex, !tb.SortDescending) - } - tb.Update() // requires full update due to sort button icon -} - -// TensorDisplayAction allows user to select tensor display options for column -// pass -1 for global params for the entire table -func (tb *Table) TensorDisplayAction(fldIndex int) { - ctd := &tb.TensorDisplay - if fldIndex >= 0 { - ctd = tb.SetColumnTensorDisplay(fldIndex) - } - d := core.NewBody("Tensor grid display options") - core.NewForm(d).SetStruct(ctd) - d.RunFullDialog(tb) - // tv.UpdateSliceGrid() - tb.NeedsRender() -} - -func (tb *Table) HasStyler() bool { return false } - -func (tb *Table) StyleRow(w core.Widget, idx, fidx int) {} - -// SortFieldName returns the name of the field being sorted, along with :up or -// :down depending on descending -func (tb *Table) SortFieldName() string { - if tb.SortIndex >= 0 && tb.SortIndex < tb.NCols { - nm := tb.Table.Table.ColumnNames[tb.SortIndex] - if tb.SortDescending { - nm += ":down" - } else { - nm += ":up" - } - return nm - } - return "" -} - -// SetSortField sets sorting to happen on given field and direction -- see -// SortFieldName for details -func (tb *Table) SetSortFieldName(nm string) { - if nm == "" { - return - } - spnm := strings.Split(nm, ":") - got := false - for fli := 0; fli < tb.NCols; fli++ { - fld := tb.Table.Table.ColumnNames[fli] - if fld == spnm[0] { - got = true - // fmt.Println("sorting on:", fld.Name, fli, "from:", nm) - tb.SortIndex = fli - } - } - if len(spnm) == 2 { - if spnm[1] == "down" { - tb.SortDescending = true - } else { - tb.SortDescending = false - } - } - _ = got - // if got { - // tv.SortSlice() - // } -} - -// RowFirstVisWidget returns the first visible widget for given row (could be -// index or not) -- false if out of range -func (tb *Table) RowFirstVisWidget(row int) (*core.WidgetBase, bool) { - if !tb.IsRowInBounds(row) { - return nil, false - } - nWidgPerRow, idxOff := tb.RowWidgetNs() - lg := tb.ListGrid - w := lg.Children[row*nWidgPerRow].(core.Widget).AsWidget() - if w.Geom.TotalBBox != (image.Rectangle{}) { - return w, true - } - ridx := nWidgPerRow * row - for fli := 0; fli < tb.NCols; fli++ { - w := lg.Child(ridx + idxOff + fli).(core.Widget).AsWidget() - if w.Geom.TotalBBox != (image.Rectangle{}) { - return w, true - } - } - return nil, false -} - -// RowGrabFocus grabs the focus for the first focusable widget in given row -- -// returns that element or nil if not successful -- note: grid must have -// already rendered for focus to be grabbed! -func (tb *Table) RowGrabFocus(row int) *core.WidgetBase { - if !tb.IsRowInBounds(row) || tb.InFocusGrab { // range check - return nil - } - nWidgPerRow, idxOff := tb.RowWidgetNs() - ridx := nWidgPerRow * row - lg := tb.ListGrid - // first check if we already have focus - for fli := 0; fli < tb.NCols; fli++ { - w := lg.Child(ridx + idxOff + fli).(core.Widget).AsWidget() - if w.StateIs(states.Focused) || w.ContainsFocus() { - return w - } - } - tb.InFocusGrab = true - defer func() { tb.InFocusGrab = false }() - for fli := 0; fli < tb.NCols; fli++ { - w := lg.Child(ridx + idxOff + fli).(core.Widget).AsWidget() - if w.CanFocus() { - w.SetFocus() - return w - } - } - return nil -} - -////////////////////////////////////////////////////// -// Header layout - -func (tb *Table) SizeFinal() { - tb.ListBase.SizeFinal() - lg := tb.ListGrid - sh := tb.SliceHeader() - sh.ForWidgetChildren(func(i int, cw core.Widget, cwb *core.WidgetBase) bool { - sgb := core.AsWidget(lg.Child(i)) - gsz := &sgb.Geom.Size - if gsz.Actual.Total.X == 0 { - return tree.Continue - } - ksz := &cwb.Geom.Size - ksz.Actual.Total.X = gsz.Actual.Total.X - ksz.Actual.Content.X = gsz.Actual.Content.X - ksz.Alloc.Total.X = gsz.Alloc.Total.X - ksz.Alloc.Content.X = gsz.Alloc.Content.X - return tree.Continue - }) - gsz := &lg.Geom.Size - ksz := &sh.Geom.Size - if gsz.Actual.Total.X > 0 { - ksz.Actual.Total.X = gsz.Actual.Total.X - ksz.Actual.Content.X = gsz.Actual.Content.X - ksz.Alloc.Total.X = gsz.Alloc.Total.X - ksz.Alloc.Content.X = gsz.Alloc.Content.X - } -} - -// SelectedColumnStrings returns the string values of given column name. -func (tb *Table) SelectedColumnStrings(colName string) []string { - dt := tb.Table.Table - jis := tb.SelectedIndexesList(false) - if len(jis) == 0 || dt == nil { - return nil - } - var s []string - for _, i := range jis { - v := dt.StringValue(colName, i) - s = append(s, v) - } - return s -} - -////////////////////////////////////////////////////////////////////////////// -// Copy / Cut / Paste - -func (tb *Table) MakeToolbar(p *tree.Plan) { - if tb.Table == nil || tb.Table.Table == nil { - return - } - tree.Add(p, func(w *core.FuncButton) { - w.SetFunc(tb.Table.AddRows).SetIcon(icons.Add) - w.SetAfterFunc(func() { tb.Update() }) - }) - tree.Add(p, func(w *core.FuncButton) { - w.SetFunc(tb.Table.SortColumnName).SetText("Sort").SetIcon(icons.Sort) - w.SetAfterFunc(func() { tb.Update() }) - }) - tree.Add(p, func(w *core.FuncButton) { - w.SetFunc(tb.Table.FilterColumnName).SetText("Filter").SetIcon(icons.FilterAlt) - w.SetAfterFunc(func() { tb.Update() }) - }) - tree.Add(p, func(w *core.FuncButton) { - w.SetFunc(tb.Table.Sequential).SetText("Unfilter").SetIcon(icons.FilterAltOff) - w.SetAfterFunc(func() { tb.Update() }) - }) - tree.Add(p, func(w *core.FuncButton) { - w.SetFunc(tb.Table.OpenCSV).SetIcon(icons.Open) - w.SetAfterFunc(func() { tb.Update() }) - }) - tree.Add(p, func(w *core.FuncButton) { - w.SetFunc(tb.Table.SaveCSV).SetIcon(icons.Save) - w.SetAfterFunc(func() { tb.Update() }) - }) -} - -func (tb *Table) MimeDataType() string { - return fileinfo.DataCsv -} - -// CopySelectToMime copies selected rows to mime data -func (tb *Table) CopySelectToMime() mimedata.Mimes { - nitms := len(tb.SelectedIndexes) - if nitms == 0 { - return nil - } - ix := &table.IndexView{} - ix.Table = tb.Table.Table - idx := tb.SelectedIndexesList(false) // ascending - iidx := make([]int, len(idx)) - for i, di := range idx { - iidx[i] = tb.Table.Indexes[di] - } - ix.Indexes = iidx - var b bytes.Buffer - ix.WriteCSV(&b, table.Tab, table.Headers) - md := mimedata.NewTextBytes(b.Bytes()) - md[0].Type = fileinfo.DataCsv - return md -} - -// FromMimeData returns records from csv of mime data -func (tb *Table) FromMimeData(md mimedata.Mimes) [][]string { - var recs [][]string - for _, d := range md { - if d.Type == fileinfo.DataCsv { - b := bytes.NewBuffer(d.Data) - cr := csv.NewReader(b) - cr.Comma = table.Tab.Rune() - rec, err := cr.ReadAll() - if err != nil || len(rec) == 0 { - log.Printf("Error reading CSV from clipboard: %s\n", err) - return nil - } - recs = append(recs, rec...) - } - } - return recs -} - -// PasteAssign assigns mime data (only the first one!) to this idx -func (tb *Table) PasteAssign(md mimedata.Mimes, idx int) { - recs := tb.FromMimeData(md) - if len(recs) == 0 { - return - } - tb.Table.Table.ReadCSVRow(recs[1], tb.Table.Indexes[idx]) - tb.UpdateChange() -} - -// PasteAtIndex inserts object(s) from mime data at (before) given slice index -// adds to end of table -func (tb *Table) PasteAtIndex(md mimedata.Mimes, idx int) { - recs := tb.FromMimeData(md) - nr := len(recs) - 1 - if nr <= 0 { - return - } - tb.Table.InsertRows(idx, nr) - for ri := 0; ri < nr; ri++ { - rec := recs[1+ri] - rw := tb.Table.Indexes[idx+ri] - tb.Table.Table.ReadCSVRow(rec, rw) - } - tb.SendChange() - tb.SelectIndexEvent(idx, events.SelectOne) - tb.Update() -} diff --git a/tensor/tensorcore/tensoreditor.go b/tensor/tensorcore/tensoreditor.go deleted file mode 100644 index 65a833cc91..0000000000 --- a/tensor/tensorcore/tensoreditor.go +++ /dev/null @@ -1,465 +0,0 @@ -// Copyright (c) 2023, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package tensorcore provides GUI Cogent Core widgets for tensor types. -package tensorcore - -import ( - "fmt" - "image" - "strconv" - - "cogentcore.org/core/base/fileinfo" - "cogentcore.org/core/base/fileinfo/mimedata" - "cogentcore.org/core/core" - "cogentcore.org/core/events" - "cogentcore.org/core/icons" - "cogentcore.org/core/styles" - "cogentcore.org/core/styles/states" - "cogentcore.org/core/styles/units" - "cogentcore.org/core/tensor" - "cogentcore.org/core/tensor/table" - "cogentcore.org/core/tree" -) - -// TensorEditor provides a GUI widget for representing [tensor.Tensor] values. -type TensorEditor struct { - core.ListBase - - // the tensor that we're a view of - Tensor tensor.Tensor `set:"-"` - - // overall layout options for tensor display - Layout TensorLayout `set:"-"` - - // number of columns in table (as of last update) - NCols int `edit:"-"` - - // headerWidths has number of characters in each header, per visfields - headerWidths []int `copier:"-" display:"-" json:"-" xml:"-"` - - // colMaxWidths records maximum width in chars of string type fields - colMaxWidths []int `set:"-" copier:"-" json:"-" xml:"-"` - - // blank values for out-of-range rows - BlankString string - BlankFloat float64 -} - -// check for interface impl -var _ core.Lister = (*TensorEditor)(nil) - -func (tb *TensorEditor) Init() { - tb.ListBase.Init() - tb.Makers.Normal[0] = func(p *tree.Plan) { // TODO: reduce redundancy with ListBase Maker - svi := tb.This.(core.Lister) - svi.UpdateSliceSize() - - scrollTo := -1 - if tb.InitSelectedIndex >= 0 { - tb.SelectedIndex = tb.InitSelectedIndex - tb.InitSelectedIndex = -1 - scrollTo = tb.SelectedIndex - } - if scrollTo >= 0 { - tb.ScrollToIndex(scrollTo) - } - - tb.UpdateStartIndex() - tb.UpdateMaxWidths() - - tb.Updater(func() { - tb.UpdateStartIndex() - }) - - tb.MakeHeader(p) - tb.MakeGrid(p, func(p *tree.Plan) { - for i := 0; i < tb.VisibleRows; i++ { - svi.MakeRow(p, i) - } - }) - } -} - -func (tb *TensorEditor) SliceIndex(i int) (si, vi int, invis bool) { - si = tb.StartIndex + i - vi = si - invis = si >= tb.SliceSize - if !tb.Layout.TopZero { - vi = (tb.SliceSize - 1) - si - } - return -} - -// StyleValue performs additional value widget styling -func (tb *TensorEditor) StyleValue(w core.Widget, s *styles.Style, row, col int) { - hw := float32(tb.headerWidths[col]) - if len(tb.colMaxWidths) > col { - hw = max(float32(tb.colMaxWidths[col]), hw) - } - hv := units.Ch(hw) - s.Min.X.Value = max(s.Min.X.Value, hv.Convert(s.Min.X.Unit, &s.UnitContext).Value) - s.SetTextWrap(false) -} - -// SetTensor sets the source tensor that we are viewing, -// and then configures the display. -func (tb *TensorEditor) SetTensor(et tensor.Tensor) *TensorEditor { - if et == nil { - return nil - } - - tb.Tensor = et - tb.This.(core.Lister).UpdateSliceSize() - tb.SetSliceBase() - tb.Update() - return tb -} - -func (tb *TensorEditor) UpdateSliceSize() int { - tb.SliceSize, tb.NCols, _, _ = tensor.Projection2DShape(tb.Tensor.Shape(), tb.Layout.OddRow) - return tb.SliceSize -} - -func (tb *TensorEditor) UpdateMaxWidths() { - if len(tb.headerWidths) != tb.NCols { - tb.headerWidths = make([]int, tb.NCols) - tb.colMaxWidths = make([]int, tb.NCols) - } - if tb.SliceSize == 0 { - return - } - _, isstr := tb.Tensor.(*tensor.String) - for fli := 0; fli < tb.NCols; fli++ { - tb.colMaxWidths[fli] = 0 - if !isstr { - continue - } - mxw := 0 - // for _, ixi := range tb.Tensor.Indexes { - // if ixi >= 0 { - // sval := stsr.Values[ixi] - // mxw = max(mxw, len(sval)) - // } - // } - tb.colMaxWidths[fli] = mxw - } -} - -func (tb *TensorEditor) MakeHeader(p *tree.Plan) { - tree.AddAt(p, "header", func(w *core.Frame) { - core.ToolbarStyles(w) - w.FinalStyler(func(s *styles.Style) { - s.Padding.Zero() - s.Grow.Set(0, 0) - s.Gap.Set(units.Em(0.5)) // matches grid default - }) - w.Maker(func(p *tree.Plan) { - if tb.ShowIndexes { - tree.AddAt(p, "_head-index", func(w *core.Text) { // TODO: is not working - w.SetType(core.TextBodyMedium) - w.Styler(func(s *styles.Style) { - s.Align.Self = styles.Center - }) - w.SetText("Index") - }) - } - for fli := 0; fli < tb.NCols; fli++ { - hdr := tb.ColumnHeader(fli) - tree.AddAt(p, "head-"+hdr, func(w *core.Button) { - w.SetType(core.ButtonAction) - w.Styler(func(s *styles.Style) { - s.Justify.Content = styles.Start - }) - w.Updater(func() { - hdr := tb.ColumnHeader(fli) - w.SetText(hdr).SetTooltip(hdr) - tb.headerWidths[fli] = len(hdr) - }) - }) - } - }) - }) -} - -func (tb *TensorEditor) ColumnHeader(col int) string { - _, cc := tensor.Projection2DCoords(tb.Tensor.Shape(), tb.Layout.OddRow, 0, col) - sitxt := "" - for i, ccc := range cc { - sitxt += fmt.Sprintf("%03d", ccc) - if i < len(cc)-1 { - sitxt += "," - } - } - return sitxt -} - -// SliceHeader returns the Frame header for slice grid -func (tb *TensorEditor) SliceHeader() *core.Frame { - return tb.Child(0).(*core.Frame) -} - -// RowWidgetNs returns number of widgets per row and offset for index label -func (tb *TensorEditor) RowWidgetNs() (nWidgPerRow, idxOff int) { - nWidgPerRow = 1 + tb.NCols - idxOff = 1 - if !tb.ShowIndexes { - nWidgPerRow -= 1 - idxOff = 0 - } - return -} - -func (tb *TensorEditor) MakeRow(p *tree.Plan, i int) { - svi := tb.This.(core.Lister) - si, _, invis := svi.SliceIndex(i) - itxt := strconv.Itoa(i) - - if tb.ShowIndexes { - tb.MakeGridIndex(p, i, si, itxt, invis) - } - - _, isstr := tb.Tensor.(*tensor.String) - for fli := 0; fli < tb.NCols; fli++ { - valnm := fmt.Sprintf("value-%v.%v", fli, itxt) - - fval := float64(0) - str := "" - tree.AddNew(p, valnm, func() core.Value { - if isstr { - return core.NewValue(&str, "") - } else { - return core.NewValue(&fval, "") - } - }, func(w core.Value) { - wb := w.AsWidget() - tb.MakeValue(w, i) - w.AsTree().SetProperty(core.ListColProperty, fli) - if !tb.IsReadOnly() { - wb.OnChange(func(e events.Event) { - _, vi, invis := svi.SliceIndex(i) - if !invis { - if isstr { - tensor.Projection2DSetString(tb.Tensor, tb.Layout.OddRow, vi, fli, str) - } else { - tensor.Projection2DSet(tb.Tensor, tb.Layout.OddRow, vi, fli, fval) - } - } - tb.This.(core.Lister).UpdateMaxWidths() - tb.SendChange() - }) - } - wb.Updater(func() { - _, vi, invis := svi.SliceIndex(i) - if !invis { - if isstr { - str = tensor.Projection2DString(tb.Tensor, tb.Layout.OddRow, vi, fli) - core.Bind(&str, w) - } else { - fval = tensor.Projection2DValue(tb.Tensor, tb.Layout.OddRow, vi, fli) - core.Bind(&fval, w) - } - } else { - if isstr { - core.Bind(tb.BlankString, w) - } else { - core.Bind(tb.BlankFloat, w) - } - } - wb.SetReadOnly(tb.IsReadOnly()) - wb.SetState(invis, states.Invisible) - if svi.HasStyler() { - w.Style() - } - if invis { - wb.SetSelected(false) - } - }) - }) - } -} - -func (tb *TensorEditor) HasStyler() bool { return false } - -func (tb *TensorEditor) StyleRow(w core.Widget, idx, fidx int) {} - -// RowFirstVisWidget returns the first visible widget for given row (could be -// index or not) -- false if out of range -func (tb *TensorEditor) RowFirstVisWidget(row int) (*core.WidgetBase, bool) { - if !tb.IsRowInBounds(row) { - return nil, false - } - nWidgPerRow, idxOff := tb.RowWidgetNs() - lg := tb.ListGrid - w := lg.Children[row*nWidgPerRow].(core.Widget).AsWidget() - if w.Geom.TotalBBox != (image.Rectangle{}) { - return w, true - } - ridx := nWidgPerRow * row - for fli := 0; fli < tb.NCols; fli++ { - w := lg.Child(ridx + idxOff + fli).(core.Widget).AsWidget() - if w.Geom.TotalBBox != (image.Rectangle{}) { - return w, true - } - } - return nil, false -} - -// RowGrabFocus grabs the focus for the first focusable widget in given row -- -// returns that element or nil if not successful -- note: grid must have -// already rendered for focus to be grabbed! -func (tb *TensorEditor) RowGrabFocus(row int) *core.WidgetBase { - if !tb.IsRowInBounds(row) || tb.InFocusGrab { // range check - return nil - } - nWidgPerRow, idxOff := tb.RowWidgetNs() - ridx := nWidgPerRow * row - lg := tb.ListGrid - // first check if we already have focus - for fli := 0; fli < tb.NCols; fli++ { - w := lg.Child(ridx + idxOff + fli).(core.Widget).AsWidget() - if w.StateIs(states.Focused) || w.ContainsFocus() { - return w - } - } - tb.InFocusGrab = true - defer func() { tb.InFocusGrab = false }() - for fli := 0; fli < tb.NCols; fli++ { - w := lg.Child(ridx + idxOff + fli).(core.Widget).AsWidget() - if w.CanFocus() { - w.SetFocus() - return w - } - } - return nil -} - -////////////////////////////////////////////////////// -// Header layout - -func (tb *TensorEditor) SizeFinal() { - tb.ListBase.SizeFinal() - lg := tb.ListGrid - sh := tb.SliceHeader() - sh.ForWidgetChildren(func(i int, cw core.Widget, cwb *core.WidgetBase) bool { - sgb := core.AsWidget(lg.Child(i)) - gsz := &sgb.Geom.Size - if gsz.Actual.Total.X == 0 { - return tree.Continue - } - ksz := &cwb.Geom.Size - ksz.Actual.Total.X = gsz.Actual.Total.X - ksz.Actual.Content.X = gsz.Actual.Content.X - ksz.Alloc.Total.X = gsz.Alloc.Total.X - ksz.Alloc.Content.X = gsz.Alloc.Content.X - return tree.Continue - }) - gsz := &lg.Geom.Size - ksz := &sh.Geom.Size - if gsz.Actual.Total.X > 0 { - ksz.Actual.Total.X = gsz.Actual.Total.X - ksz.Actual.Content.X = gsz.Actual.Content.X - ksz.Alloc.Total.X = gsz.Alloc.Total.X - ksz.Alloc.Content.X = gsz.Alloc.Content.X - } -} - -////////////////////////////////////////////////////////////////////////////// -// Copy / Cut / Paste - -// SaveTSV writes a tensor to a tab-separated-values (TSV) file. -// Outer-most dims are rows in the file, and inner-most is column -- -// Reading just grabs all values and doesn't care about shape. -func (tb *TensorEditor) SaveCSV(filename core.Filename) error { //types:add - return tensor.SaveCSV(tb.Tensor, filename, table.Tab.Rune()) -} - -// OpenTSV reads a tensor from a tab-separated-values (TSV) file. -// using the Go standard encoding/csv reader conforming -// to the official CSV standard. -// Reads all values and assigns as many as fit. -func (tb *TensorEditor) OpenCSV(filename core.Filename) error { //types:add - return tensor.OpenCSV(tb.Tensor, filename, table.Tab.Rune()) -} - -func (tb *TensorEditor) MakeToolbar(p *tree.Plan) { - if tb.Tensor == nil { - return - } - tree.Add(p, func(w *core.FuncButton) { - w.SetFunc(tb.OpenCSV).SetIcon(icons.Open) - w.SetAfterFunc(func() { tb.Update() }) - }) - tree.Add(p, func(w *core.FuncButton) { - w.SetFunc(tb.SaveCSV).SetIcon(icons.Save) - w.SetAfterFunc(func() { tb.Update() }) - }) -} - -func (tb *TensorEditor) MimeDataType() string { - return fileinfo.DataCsv -} - -// CopySelectToMime copies selected rows to mime data -func (tb *TensorEditor) CopySelectToMime() mimedata.Mimes { - nitms := len(tb.SelectedIndexes) - if nitms == 0 { - return nil - } - // idx := tb.SelectedIndexesList(false) // ascending - // var b bytes.Buffer - // ix.WriteCSV(&b, table.Tab, table.Headers) - // md := mimedata.NewTextBytes(b.Bytes()) - // md[0].Type = fileinfo.DataCsv - // return md - return nil -} - -// FromMimeData returns records from csv of mime data -func (tb *TensorEditor) FromMimeData(md mimedata.Mimes) [][]string { - var recs [][]string - for _, d := range md { - if d.Type == fileinfo.DataCsv { - // b := bytes.NewBuffer(d.Data) - // cr := csv.NewReader(b) - // cr.Comma = table.Tab.Rune() - // rec, err := cr.ReadAll() - // if err != nil || len(rec) == 0 { - // log.Printf("Error reading CSV from clipboard: %s\n", err) - // return nil - // } - // recs = append(recs, rec...) - } - } - return recs -} - -// PasteAssign assigns mime data (only the first one!) to this idx -func (tb *TensorEditor) PasteAssign(md mimedata.Mimes, idx int) { - // recs := tb.FromMimeData(md) - // if len(recs) == 0 { - // return - // } - // tb.Tensor.ReadCSVRow(recs[1], tb.Tensor.Indexes[idx]) - // tb.UpdateChange() -} - -// PasteAtIndex inserts object(s) from mime data at (before) given slice index -// adds to end of table -func (tb *TensorEditor) PasteAtIndex(md mimedata.Mimes, idx int) { - // recs := tb.FromMimeData(md) - // nr := len(recs) - 1 - // if nr <= 0 { - // return - // } - // tb.Tensor.InsertRows(idx, nr) - // for ri := 0; ri < nr; ri++ { - // rec := recs[1+ri] - // rw := tb.Tensor.Indexes[idx+ri] - // tb.Tensor.ReadCSVRow(rec, rw) - // } - // tb.SendChange() - // tb.SelectIndexEvent(idx, events.SelectOne) - // tb.Update() -} diff --git a/tensor/tensorcore/tensorgrid.go b/tensor/tensorcore/tensorgrid.go deleted file mode 100644 index a6d054a744..0000000000 --- a/tensor/tensorcore/tensorgrid.go +++ /dev/null @@ -1,418 +0,0 @@ -// Copyright (c) 2019, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tensorcore - -import ( - "image/color" - "log" - "strconv" - - "cogentcore.org/core/colors" - "cogentcore.org/core/colors/colormap" - "cogentcore.org/core/core" - "cogentcore.org/core/events" - "cogentcore.org/core/icons" - "cogentcore.org/core/math32" - "cogentcore.org/core/math32/minmax" - "cogentcore.org/core/styles" - "cogentcore.org/core/styles/abilities" - "cogentcore.org/core/styles/units" - "cogentcore.org/core/tensor" -) - -// TensorLayout are layout options for displaying tensors -type TensorLayout struct { //types:add - - // even-numbered dimensions are displayed as Y*X rectangles. - // This determines along which dimension to display any remaining - // odd dimension: OddRow = true = organize vertically along row - // dimension, false = organize horizontally across column dimension. - OddRow bool - - // if true, then the Y=0 coordinate is displayed from the top-down; - // otherwise the Y=0 coordinate is displayed from the bottom up, - // which is typical for emergent network patterns. - TopZero bool - - // display the data as a bitmap image. if a 2D tensor, then it will - // be a greyscale image. if a 3D tensor with size of either the first - // or last dim = either 3 or 4, then it is a RGB(A) color image. - Image bool -} - -// TensorDisplay are options for displaying tensors -type TensorDisplay struct { //types:add - TensorLayout - - // range to plot - Range minmax.Range64 `display:"inline"` - - // if not using fixed range, this is the actual range of data - MinMax minmax.F64 `display:"inline"` - - // the name of the color map to use in translating values to colors - ColorMap core.ColorMapName - - // what proportion of grid square should be filled by color block -- 1 = all, .5 = half, etc - GridFill float32 `min:"0.1" max:"1" step:"0.1" default:"0.9,1"` - - // amount of extra space to add at dimension boundaries, as a proportion of total grid size - DimExtra float32 `min:"0" max:"1" step:"0.02" default:"0.1,0.3"` - - // minimum size for grid squares -- they will never be smaller than this - GridMinSize float32 - - // maximum size for grid squares -- they will never be larger than this - GridMaxSize float32 - - // total preferred display size along largest dimension. - // grid squares will be sized to fit within this size, - // subject to harder GridMin / Max size constraints - TotPrefSize float32 - - // font size in standard point units for labels (e.g., SimMat) - FontSize float32 - - // our gridview, for update method - GridView *TensorGrid `copier:"-" json:"-" xml:"-" display:"-"` -} - -// Defaults sets defaults for values that are at nonsensical initial values -func (td *TensorDisplay) Defaults() { - if td.ColorMap == "" { - td.ColorMap = "ColdHot" - } - if td.Range.Max == 0 && td.Range.Min == 0 { - td.Range.SetMin(-1) - td.Range.SetMax(1) - } - if td.GridMinSize == 0 { - td.GridMinSize = 2 - } - if td.GridMaxSize == 0 { - td.GridMaxSize = 16 - } - if td.TotPrefSize == 0 { - td.TotPrefSize = 100 - } - if td.GridFill == 0 { - td.GridFill = 0.9 - td.DimExtra = 0.3 - } - if td.FontSize == 0 { - td.FontSize = 24 - } -} - -// FromMeta sets display options from Tensor meta-data -func (td *TensorDisplay) FromMeta(tsr tensor.Tensor) { - if op, has := tsr.MetaData("top-zero"); has { - if op == "+" || op == "true" { - td.TopZero = true - } - } - if op, has := tsr.MetaData("odd-row"); has { - if op == "+" || op == "true" { - td.OddRow = true - } - } - if op, has := tsr.MetaData("image"); has { - if op == "+" || op == "true" { - td.Image = true - } - } - if op, has := tsr.MetaData("min"); has { - mv, _ := strconv.ParseFloat(op, 64) - td.Range.Min = mv - } - if op, has := tsr.MetaData("max"); has { - mv, _ := strconv.ParseFloat(op, 64) - td.Range.Max = mv - } - if op, has := tsr.MetaData("fix-min"); has { - if op == "+" || op == "true" { - td.Range.FixMin = true - } else { - td.Range.FixMin = false - } - } - if op, has := tsr.MetaData("fix-max"); has { - if op == "+" || op == "true" { - td.Range.FixMax = true - } else { - td.Range.FixMax = false - } - } - if op, has := tsr.MetaData("colormap"); has { - td.ColorMap = core.ColorMapName(op) - } - if op, has := tsr.MetaData("grid-fill"); has { - mv, _ := strconv.ParseFloat(op, 32) - td.GridFill = float32(mv) - } - if op, has := tsr.MetaData("grid-min"); has { - mv, _ := strconv.ParseFloat(op, 32) - td.GridMinSize = float32(mv) - } - if op, has := tsr.MetaData("grid-max"); has { - mv, _ := strconv.ParseFloat(op, 32) - td.GridMaxSize = float32(mv) - } - if op, has := tsr.MetaData("dim-extra"); has { - mv, _ := strconv.ParseFloat(op, 32) - td.DimExtra = float32(mv) - } - if op, has := tsr.MetaData("font-size"); has { - mv, _ := strconv.ParseFloat(op, 32) - td.FontSize = float32(mv) - } -} - -//////////////////////////////////////////////////////////////////////////// -// TensorGrid - -// TensorGrid is a widget that displays tensor values as a grid of colored squares. -type TensorGrid struct { - core.WidgetBase - - // the tensor that we view - Tensor tensor.Tensor `set:"-"` - - // display options - Display TensorDisplay - - // the actual colormap - ColorMap *colormap.Map -} - -func (tg *TensorGrid) WidgetValue() any { return &tg.Tensor } - -func (tg *TensorGrid) SetWidgetValue(value any) error { - tg.SetTensor(value.(tensor.Tensor)) - return nil -} - -func (tg *TensorGrid) Init() { - tg.WidgetBase.Init() - tg.Display.GridView = tg - tg.Display.Defaults() - tg.Styler(func(s *styles.Style) { - s.SetAbilities(true, abilities.DoubleClickable) - ms := tg.MinSize() - s.Min.Set(units.Dot(ms.X), units.Dot(ms.Y)) - s.Grow.Set(1, 1) - }) - - tg.OnDoubleClick(func(e events.Event) { - tg.OpenTensorEditor() - }) - tg.AddContextMenu(func(m *core.Scene) { - core.NewFuncButton(m).SetFunc(tg.OpenTensorEditor).SetIcon(icons.Edit) - core.NewFuncButton(m).SetFunc(tg.EditSettings).SetIcon(icons.Edit) - }) -} - -// SetTensor sets the tensor. Must call Update after this. -func (tg *TensorGrid) SetTensor(tsr tensor.Tensor) *TensorGrid { - if _, ok := tsr.(*tensor.String); ok { - log.Printf("TensorGrid: String tensors cannot be displayed using TensorGrid\n") - return tg - } - tg.Tensor = tsr - if tg.Tensor != nil { - tg.Display.FromMeta(tg.Tensor) - } - return tg -} - -// OpenTensorEditor pulls up a TensorEditor of our tensor -func (tg *TensorGrid) OpenTensorEditor() { //types:add - d := core.NewBody("Tensor Editor") - tb := core.NewToolbar(d) - te := NewTensorEditor(d).SetTensor(tg.Tensor) - te.OnChange(func(e events.Event) { - tg.NeedsRender() - }) - tb.Maker(te.MakeToolbar) - d.RunWindowDialog(tg) -} - -func (tg *TensorGrid) EditSettings() { //types:add - d := core.NewBody("Tensor Grid Display Options") - core.NewForm(d).SetStruct(&tg.Display). - OnChange(func(e events.Event) { - tg.NeedsRender() - }) - d.RunWindowDialog(tg) -} - -// MinSize returns minimum size based on tensor and display settings -func (tg *TensorGrid) MinSize() math32.Vector2 { - if tg.Tensor == nil || tg.Tensor.Len() == 0 { - return math32.Vector2{} - } - if tg.Display.Image { - return math32.Vec2(float32(tg.Tensor.DimSize(1)), float32(tg.Tensor.DimSize(0))) - } - rows, cols, rowEx, colEx := tensor.Projection2DShape(tg.Tensor.Shape(), tg.Display.OddRow) - frw := float32(rows) + float32(rowEx)*tg.Display.DimExtra // extra spacing - fcl := float32(cols) + float32(colEx)*tg.Display.DimExtra // extra spacing - mx := float32(max(frw, fcl)) - gsz := tg.Display.TotPrefSize / mx - gsz = max(gsz, tg.Display.GridMinSize) - gsz = min(gsz, tg.Display.GridMaxSize) - gsz = max(gsz, 2) - return math32.Vec2(gsz*float32(fcl), gsz*float32(frw)) -} - -// EnsureColorMap makes sure there is a valid color map that matches specified name -func (tg *TensorGrid) EnsureColorMap() { - if tg.ColorMap != nil && tg.ColorMap.Name != string(tg.Display.ColorMap) { - tg.ColorMap = nil - } - if tg.ColorMap == nil { - ok := false - tg.ColorMap, ok = colormap.AvailableMaps[string(tg.Display.ColorMap)] - if !ok { - tg.Display.ColorMap = "" - tg.Display.Defaults() - } - tg.ColorMap = colormap.AvailableMaps[string(tg.Display.ColorMap)] - } -} - -func (tg *TensorGrid) Color(val float64) (norm float64, clr color.Color) { - if tg.ColorMap.Indexed { - clr = tg.ColorMap.MapIndex(int(val)) - } else { - norm = tg.Display.Range.ClipNormValue(val) - clr = tg.ColorMap.Map(float32(norm)) - } - return -} - -func (tg *TensorGrid) UpdateRange() { - if !tg.Display.Range.FixMin || !tg.Display.Range.FixMax { - min, max, _, _ := tg.Tensor.Range() - if !tg.Display.Range.FixMin { - nmin := minmax.NiceRoundNumber(min, true) // true = below # - tg.Display.Range.Min = nmin - } - if !tg.Display.Range.FixMax { - nmax := minmax.NiceRoundNumber(max, false) // false = above # - tg.Display.Range.Max = nmax - } - } -} - -func (tg *TensorGrid) Render() { - if tg.Tensor == nil || tg.Tensor.Len() == 0 { - return - } - tg.EnsureColorMap() - tg.UpdateRange() - - pc := &tg.Scene.PaintContext - - pos := tg.Geom.Pos.Content - sz := tg.Geom.Size.Actual.Content - // sz.SetSubScalar(tg.Disp.BotRtSpace.Dots) - - pc.FillBox(pos, sz, tg.Styles.Background) - - tsr := tg.Tensor - - if tg.Display.Image { - ysz := tsr.DimSize(0) - xsz := tsr.DimSize(1) - nclr := 1 - outclr := false // outer dimension is color - if tsr.NumDims() == 3 { - if tsr.DimSize(0) == 3 || tsr.DimSize(0) == 4 { - outclr = true - ysz = tsr.DimSize(1) - xsz = tsr.DimSize(2) - nclr = tsr.DimSize(0) - } else { - nclr = tsr.DimSize(2) - } - } - tsz := math32.Vec2(float32(xsz), float32(ysz)) - gsz := sz.Div(tsz) - for y := 0; y < ysz; y++ { - for x := 0; x < xsz; x++ { - ey := y - if !tg.Display.TopZero { - ey = (ysz - 1) - y - } - switch { - case outclr: - var r, g, b, a float64 - a = 1 - r = tg.Display.Range.ClipNormValue(tsr.Float([]int{0, y, x})) - g = tg.Display.Range.ClipNormValue(tsr.Float([]int{1, y, x})) - b = tg.Display.Range.ClipNormValue(tsr.Float([]int{2, y, x})) - if nclr > 3 { - a = tg.Display.Range.ClipNormValue(tsr.Float([]int{3, y, x})) - } - cr := math32.Vec2(float32(x), float32(ey)) - pr := pos.Add(cr.Mul(gsz)) - pc.StrokeStyle.Color = colors.Uniform(colors.FromFloat64(r, g, b, a)) - pc.FillBox(pr, gsz, pc.StrokeStyle.Color) - case nclr > 1: - var r, g, b, a float64 - a = 1 - r = tg.Display.Range.ClipNormValue(tsr.Float([]int{y, x, 0})) - g = tg.Display.Range.ClipNormValue(tsr.Float([]int{y, x, 1})) - b = tg.Display.Range.ClipNormValue(tsr.Float([]int{y, x, 2})) - if nclr > 3 { - a = tg.Display.Range.ClipNormValue(tsr.Float([]int{y, x, 3})) - } - cr := math32.Vec2(float32(x), float32(ey)) - pr := pos.Add(cr.Mul(gsz)) - pc.StrokeStyle.Color = colors.Uniform(colors.FromFloat64(r, g, b, a)) - pc.FillBox(pr, gsz, pc.StrokeStyle.Color) - default: - val := tg.Display.Range.ClipNormValue(tsr.Float([]int{y, x})) - cr := math32.Vec2(float32(x), float32(ey)) - pr := pos.Add(cr.Mul(gsz)) - pc.StrokeStyle.Color = colors.Uniform(colors.FromFloat64(val, val, val, 1)) - pc.FillBox(pr, gsz, pc.StrokeStyle.Color) - } - } - } - return - } - rows, cols, rowEx, colEx := tensor.Projection2DShape(tsr.Shape(), tg.Display.OddRow) - frw := float32(rows) + float32(rowEx)*tg.Display.DimExtra // extra spacing - fcl := float32(cols) + float32(colEx)*tg.Display.DimExtra // extra spacing - rowsInner := rows - colsInner := cols - if rowEx > 0 { - rowsInner = rows / rowEx - } - if colEx > 0 { - colsInner = cols / colEx - } - tsz := math32.Vec2(fcl, frw) - gsz := sz.Div(tsz) - - ssz := gsz.MulScalar(tg.Display.GridFill) // smaller size with margin - for y := 0; y < rows; y++ { - yex := float32(int(y/rowsInner)) * tg.Display.DimExtra - for x := 0; x < cols; x++ { - xex := float32(int(x/colsInner)) * tg.Display.DimExtra - ey := y - if !tg.Display.TopZero { - ey = (rows - 1) - y - } - val := tensor.Projection2DValue(tsr, tg.Display.OddRow, ey, x) - cr := math32.Vec2(float32(x)+xex, float32(y)+yex) - pr := pos.Add(cr.Mul(gsz)) - _, clr := tg.Color(val) - pc.FillBox(pr, ssz, colors.Uniform(clr)) - } - } -} diff --git a/tensor/tensorcore/typegen.go b/tensor/tensorcore/typegen.go deleted file mode 100644 index bbea16ff82..0000000000 --- a/tensor/tensorcore/typegen.go +++ /dev/null @@ -1,109 +0,0 @@ -// Code generated by "core generate"; DO NOT EDIT. - -package tensorcore - -import ( - "cogentcore.org/core/colors/colormap" - "cogentcore.org/core/tensor" - "cogentcore.org/core/tensor/stats/simat" - "cogentcore.org/core/tensor/table" - "cogentcore.org/core/tree" - "cogentcore.org/core/types" -) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/tensor/tensorcore.SimMatGrid", IDName: "sim-mat-grid", Doc: "SimMatGrid is a widget that displays a similarity / distance matrix\nwith tensor values as a grid of colored squares, and labels for rows and columns.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Embeds: []types.Field{{Name: "TensorGrid"}}, Fields: []types.Field{{Name: "SimMat", Doc: "the similarity / distance matrix"}, {Name: "rowMaxSz"}, {Name: "rowMinBlank"}, {Name: "rowNGps"}, {Name: "colMaxSz"}, {Name: "colMinBlank"}, {Name: "colNGps"}}}) - -// NewSimMatGrid returns a new [SimMatGrid] with the given optional parent: -// SimMatGrid is a widget that displays a similarity / distance matrix -// with tensor values as a grid of colored squares, and labels for rows and columns. -func NewSimMatGrid(parent ...tree.Node) *SimMatGrid { return tree.New[SimMatGrid](parent...) } - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/tensor/tensorcore.Table", IDName: "table", Doc: "Table provides a GUI widget for representing [table.Table] values.", Embeds: []types.Field{{Name: "ListBase"}}, Fields: []types.Field{{Name: "Table", Doc: "the idx view of the table that we're a view of"}, {Name: "TensorDisplay", Doc: "overall display options for tensor display"}, {Name: "ColumnTensorDisplay", Doc: "per column tensor display params"}, {Name: "ColumnTensorBlank", Doc: "per column blank tensor values"}, {Name: "NCols", Doc: "number of columns in table (as of last update)"}, {Name: "SortIndex", Doc: "current sort index"}, {Name: "SortDescending", Doc: "whether current sort order is descending"}, {Name: "headerWidths", Doc: "headerWidths has number of characters in each header, per visfields"}, {Name: "colMaxWidths", Doc: "colMaxWidths records maximum width in chars of string type fields"}, {Name: "BlankString", Doc: "\tblank values for out-of-range rows"}, {Name: "BlankFloat"}}}) - -// NewTable returns a new [Table] with the given optional parent: -// Table provides a GUI widget for representing [table.Table] values. -func NewTable(parent ...tree.Node) *Table { return tree.New[Table](parent...) } - -// SetNCols sets the [Table.NCols]: -// number of columns in table (as of last update) -func (t *Table) SetNCols(v int) *Table { t.NCols = v; return t } - -// SetSortIndex sets the [Table.SortIndex]: -// current sort index -func (t *Table) SetSortIndex(v int) *Table { t.SortIndex = v; return t } - -// SetSortDescending sets the [Table.SortDescending]: -// whether current sort order is descending -func (t *Table) SetSortDescending(v bool) *Table { t.SortDescending = v; return t } - -// SetBlankString sets the [Table.BlankString]: -// -// blank values for out-of-range rows -func (t *Table) SetBlankString(v string) *Table { t.BlankString = v; return t } - -// SetBlankFloat sets the [Table.BlankFloat] -func (t *Table) SetBlankFloat(v float64) *Table { t.BlankFloat = v; return t } - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/tensor/tensorcore.TensorEditor", IDName: "tensor-editor", Doc: "TensorEditor provides a GUI widget for representing [tensor.Tensor] values.", Methods: []types.Method{{Name: "SaveCSV", Doc: "SaveTSV writes a tensor to a tab-separated-values (TSV) file.\nOuter-most dims are rows in the file, and inner-most is column --\nReading just grabs all values and doesn't care about shape.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"filename"}, Returns: []string{"error"}}, {Name: "OpenCSV", Doc: "OpenTSV reads a tensor from a tab-separated-values (TSV) file.\nusing the Go standard encoding/csv reader conforming\nto the official CSV standard.\nReads all values and assigns as many as fit.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"filename"}, Returns: []string{"error"}}}, Embeds: []types.Field{{Name: "ListBase"}}, Fields: []types.Field{{Name: "Tensor", Doc: "the tensor that we're a view of"}, {Name: "Layout", Doc: "overall layout options for tensor display"}, {Name: "NCols", Doc: "number of columns in table (as of last update)"}, {Name: "headerWidths", Doc: "headerWidths has number of characters in each header, per visfields"}, {Name: "colMaxWidths", Doc: "colMaxWidths records maximum width in chars of string type fields"}, {Name: "BlankString", Doc: "\tblank values for out-of-range rows"}, {Name: "BlankFloat"}}}) - -// NewTensorEditor returns a new [TensorEditor] with the given optional parent: -// TensorEditor provides a GUI widget for representing [tensor.Tensor] values. -func NewTensorEditor(parent ...tree.Node) *TensorEditor { return tree.New[TensorEditor](parent...) } - -// SetNCols sets the [TensorEditor.NCols]: -// number of columns in table (as of last update) -func (t *TensorEditor) SetNCols(v int) *TensorEditor { t.NCols = v; return t } - -// SetBlankString sets the [TensorEditor.BlankString]: -// -// blank values for out-of-range rows -func (t *TensorEditor) SetBlankString(v string) *TensorEditor { t.BlankString = v; return t } - -// SetBlankFloat sets the [TensorEditor.BlankFloat] -func (t *TensorEditor) SetBlankFloat(v float64) *TensorEditor { t.BlankFloat = v; return t } - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/tensor/tensorcore.TensorLayout", IDName: "tensor-layout", Doc: "TensorLayout are layout options for displaying tensors", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Fields: []types.Field{{Name: "OddRow", Doc: "even-numbered dimensions are displayed as Y*X rectangles.\nThis determines along which dimension to display any remaining\nodd dimension: OddRow = true = organize vertically along row\ndimension, false = organize horizontally across column dimension."}, {Name: "TopZero", Doc: "if true, then the Y=0 coordinate is displayed from the top-down;\notherwise the Y=0 coordinate is displayed from the bottom up,\nwhich is typical for emergent network patterns."}, {Name: "Image", Doc: "display the data as a bitmap image. if a 2D tensor, then it will\nbe a greyscale image. if a 3D tensor with size of either the first\nor last dim = either 3 or 4, then it is a RGB(A) color image."}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/tensor/tensorcore.TensorDisplay", IDName: "tensor-display", Doc: "TensorDisplay are options for displaying tensors", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Embeds: []types.Field{{Name: "TensorLayout"}}, Fields: []types.Field{{Name: "Range", Doc: "range to plot"}, {Name: "MinMax", Doc: "if not using fixed range, this is the actual range of data"}, {Name: "ColorMap", Doc: "the name of the color map to use in translating values to colors"}, {Name: "GridFill", Doc: "what proportion of grid square should be filled by color block -- 1 = all, .5 = half, etc"}, {Name: "DimExtra", Doc: "amount of extra space to add at dimension boundaries, as a proportion of total grid size"}, {Name: "GridMinSize", Doc: "minimum size for grid squares -- they will never be smaller than this"}, {Name: "GridMaxSize", Doc: "maximum size for grid squares -- they will never be larger than this"}, {Name: "TotPrefSize", Doc: "total preferred display size along largest dimension.\ngrid squares will be sized to fit within this size,\nsubject to harder GridMin / Max size constraints"}, {Name: "FontSize", Doc: "font size in standard point units for labels (e.g., SimMat)"}, {Name: "GridView", Doc: "our gridview, for update method"}}}) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/tensor/tensorcore.TensorGrid", IDName: "tensor-grid", Doc: "TensorGrid is a widget that displays tensor values as a grid of colored squares.", Methods: []types.Method{{Name: "OpenTensorEditor", Doc: "OpenTensorEditor pulls up a TensorEditor of our tensor", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "EditSettings", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}}, Embeds: []types.Field{{Name: "WidgetBase"}}, Fields: []types.Field{{Name: "Tensor", Doc: "the tensor that we view"}, {Name: "Display", Doc: "display options"}, {Name: "ColorMap", Doc: "the actual colormap"}}}) - -// NewTensorGrid returns a new [TensorGrid] with the given optional parent: -// TensorGrid is a widget that displays tensor values as a grid of colored squares. -func NewTensorGrid(parent ...tree.Node) *TensorGrid { return tree.New[TensorGrid](parent...) } - -// SetDisplay sets the [TensorGrid.Display]: -// display options -func (t *TensorGrid) SetDisplay(v TensorDisplay) *TensorGrid { t.Display = v; return t } - -// SetColorMap sets the [TensorGrid.ColorMap]: -// the actual colormap -func (t *TensorGrid) SetColorMap(v *colormap.Map) *TensorGrid { t.ColorMap = v; return t } - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/tensor/tensorcore.TensorButton", IDName: "tensor-button", Doc: "TensorButton represents a Tensor with a button for making a [TensorGrid]\nviewer for an [tensor.Tensor].", Embeds: []types.Field{{Name: "Button"}}, Fields: []types.Field{{Name: "Tensor"}}}) - -// NewTensorButton returns a new [TensorButton] with the given optional parent: -// TensorButton represents a Tensor with a button for making a [TensorGrid] -// viewer for an [tensor.Tensor]. -func NewTensorButton(parent ...tree.Node) *TensorButton { return tree.New[TensorButton](parent...) } - -// SetTensor sets the [TensorButton.Tensor] -func (t *TensorButton) SetTensor(v tensor.Tensor) *TensorButton { t.Tensor = v; return t } - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/tensor/tensorcore.TableButton", IDName: "table-button", Doc: "TableButton presents a button that pulls up the [Table] viewer for a [table.Table].", Embeds: []types.Field{{Name: "Button"}}, Fields: []types.Field{{Name: "Table"}}}) - -// NewTableButton returns a new [TableButton] with the given optional parent: -// TableButton presents a button that pulls up the [Table] viewer for a [table.Table]. -func NewTableButton(parent ...tree.Node) *TableButton { return tree.New[TableButton](parent...) } - -// SetTable sets the [TableButton.Table] -func (t *TableButton) SetTable(v *table.Table) *TableButton { t.Table = v; return t } - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/tensor/tensorcore.SimMatButton", IDName: "sim-mat-button", Doc: "SimMatValue presents a button that pulls up the [SimMatGrid] viewer for a [table.Table].", Embeds: []types.Field{{Name: "Button"}}, Fields: []types.Field{{Name: "SimMat"}}}) - -// NewSimMatButton returns a new [SimMatButton] with the given optional parent: -// SimMatValue presents a button that pulls up the [SimMatGrid] viewer for a [table.Table]. -func NewSimMatButton(parent ...tree.Node) *SimMatButton { return tree.New[SimMatButton](parent...) } - -// SetSimMat sets the [SimMatButton.SimMat] -func (t *SimMatButton) SetSimMat(v *simat.SimMat) *SimMatButton { t.SimMat = v; return t } diff --git a/tensor/tensorcore/values.go b/tensor/tensorcore/values.go deleted file mode 100644 index 26eee622cd..0000000000 --- a/tensor/tensorcore/values.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright (c) 2019, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tensorcore - -import ( - "cogentcore.org/core/core" - "cogentcore.org/core/icons" - "cogentcore.org/core/tensor" - "cogentcore.org/core/tensor/stats/simat" - "cogentcore.org/core/tensor/table" -) - -func init() { - core.AddValueType[table.Table, TableButton]() - core.AddValueType[tensor.Float32, TensorButton]() - core.AddValueType[tensor.Float64, TensorButton]() - core.AddValueType[tensor.Int, TensorButton]() - core.AddValueType[tensor.Int32, TensorButton]() - core.AddValueType[tensor.Byte, TensorButton]() - core.AddValueType[tensor.String, TensorButton]() - core.AddValueType[tensor.Bits, TensorButton]() - core.AddValueType[simat.SimMat, SimMatButton]() -} - -// TensorButton represents a Tensor with a button for making a [TensorGrid] -// viewer for an [tensor.Tensor]. -type TensorButton struct { - core.Button - Tensor tensor.Tensor -} - -func (tb *TensorButton) WidgetValue() any { return &tb.Tensor } - -func (tb *TensorButton) Init() { - tb.Button.Init() - tb.SetType(core.ButtonTonal).SetIcon(icons.Edit) - tb.Updater(func() { - text := "None" - if tb.Tensor != nil { - text = "Tensor" - } - tb.SetText(text) - }) - core.InitValueButton(tb, true, func(d *core.Body) { - NewTensorGrid(d).SetTensor(tb.Tensor) - }) -} - -// TableButton presents a button that pulls up the [Table] viewer for a [table.Table]. -type TableButton struct { - core.Button - Table *table.Table -} - -func (tb *TableButton) WidgetValue() any { return &tb.Table } - -func (tb *TableButton) Init() { - tb.Button.Init() - tb.SetType(core.ButtonTonal).SetIcon(icons.Edit) - tb.Updater(func() { - text := "None" - if tb.Table != nil { - if nm, has := tb.Table.MetaData["name"]; has { - text = nm - } else { - text = "Table" - } - } - tb.SetText(text) - }) - core.InitValueButton(tb, true, func(d *core.Body) { - NewTable(d).SetTable(tb.Table) - }) -} - -// SimMatValue presents a button that pulls up the [SimMatGrid] viewer for a [table.Table]. -type SimMatButton struct { - core.Button - SimMat *simat.SimMat -} - -func (tb *SimMatButton) WidgetValue() any { return &tb.SimMat } - -func (tb *SimMatButton) Init() { - tb.Button.Init() - tb.SetType(core.ButtonTonal).SetIcon(icons.Edit) - tb.Updater(func() { - text := "None" - if tb.SimMat != nil && tb.SimMat.Mat != nil { - if nm, has := tb.SimMat.Mat.MetaData("name"); has { - text = nm - } else { - text = "SimMat" - } - } - tb.SetText(text) - }) - core.InitValueButton(tb, true, func(d *core.Body) { - NewSimMatGrid(d).SetSimMat(tb.SimMat) - }) -} diff --git a/tensor/tensormpi/README.md b/tensor/tensormpi/README.md deleted file mode 100644 index 40bc7ca026..0000000000 --- a/tensor/tensormpi/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# tensormpi: Message Passing Interface - -The `tensormpi` package has methods to support use of MPI with tensor and table data structures, using the [mpi](../../base/mpi) package for Go mpi wrappers. - -As documented in the [mpi](../../base/mpi) package, You must set the `mpi` or `mpich` build tag to actually have it build using the mpi library. The default is to build a dummy version that has 1 proc of rank 0 always, and nop versions of all the methods. - -The code here is not build tag conditionalized, but is harmless for the default 1 processor case. - -Supported functionality: - -* `GatherTensorRows`, `GatherTableRows`: Gathering `table.Table` and `tensor.Tensor` data across processors. - -* `RandCheck` checks that the current random number generated across different processors is the same, which is often needed. - -* `AllocN` allocates n items to process across mpi processors. - diff --git a/tensor/tensormpi/alloc.go b/tensor/tensormpi/alloc.go deleted file mode 100644 index 3c47d6ecd3..0000000000 --- a/tensor/tensormpi/alloc.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) 2020, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tensormpi - -import ( - "fmt" - "log" - - "cogentcore.org/core/base/mpi" -) - -// Alloc allocates n items to current mpi proc based on WorldSize and WorldRank. -// Returns start and end (exclusive) range for current proc. -func AllocN(n int) (st, end int, err error) { - nproc := mpi.WorldSize() - if n%nproc != 0 { - err = fmt.Errorf("tensormpi.AllocN: number: %d is not an even multiple of number of MPI procs: %d -- must be!", n, nproc) - log.Println(err) - } - pt := n / nproc - st = pt * mpi.WorldRank() - end = st + pt - return -} diff --git a/tensor/tensormpi/doc.go b/tensor/tensormpi/doc.go deleted file mode 100644 index 02d078f335..0000000000 --- a/tensor/tensormpi/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright (c) 2020, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package tensormpi has methods to support use of MPI with tensor and table data structures. -package tensormpi diff --git a/tensor/tensormpi/randcheck.go b/tensor/tensormpi/randcheck.go deleted file mode 100644 index 21b8714367..0000000000 --- a/tensor/tensormpi/randcheck.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (c) 2021, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tensormpi - -import ( - "errors" - "fmt" - "math/rand" - - "cogentcore.org/core/base/mpi" -) - -// RandCheck checks that the current random numbers generated across each -// MPI processor are identical. -func RandCheck(comm *mpi.Comm) error { - ws := comm.Size() - rnd := rand.Int() - src := []int{rnd} - agg := make([]int, ws) - err := comm.AllGatherInt(agg, src) - if err != nil { - return err - } - errs := "" - for i := range agg { - if agg[i] != rnd { - errs += fmt.Sprintf("%d ", i) - } - } - if errs != "" { - err = errors.New("tensormpi.RandCheck: random numbers differ in procs: " + errs) - mpi.Printf("%s\n", err) - } - return err -} diff --git a/tensor/tensormpi/table.go b/tensor/tensormpi/table.go deleted file mode 100644 index da874b1a8a..0000000000 --- a/tensor/tensormpi/table.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (c) 2020, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tensormpi - -import ( - "cogentcore.org/core/base/mpi" - "cogentcore.org/core/tensor/table" -) - -// GatherTableRows does an MPI AllGather on given src table data, gathering into dest. -// dest will have np * src.Rows Rows, filled with each processor's data, in order. -// dest must be a clone of src: if not same number of cols, will be configured from src. -func GatherTableRows(dest, src *table.Table, comm *mpi.Comm) { - sr := src.Rows - np := mpi.WorldSize() - dr := np * sr - if len(dest.Columns) != len(src.Columns) { - *dest = *src.Clone() - } - dest.SetNumRows(dr) - for ci, st := range src.Columns { - dt := dest.Columns[ci] - GatherTensorRows(dt, st, comm) - } -} - -// ReduceTable does an MPI AllReduce on given src table data using given operation, -// gathering into dest. -// each processor must have the same table organization -- the tensor values are -// just aggregated directly across processors. -// dest will be a clone of src if not the same (cos & rows), -// does nothing for strings. -func ReduceTable(dest, src *table.Table, comm *mpi.Comm, op mpi.Op) { - sr := src.Rows - if len(dest.Columns) != len(src.Columns) { - *dest = *src.Clone() - } - dest.SetNumRows(sr) - for ci, st := range src.Columns { - dt := dest.Columns[ci] - ReduceTensor(dt, st, comm, op) - } -} diff --git a/tensor/tensormpi/tensor.go b/tensor/tensormpi/tensor.go deleted file mode 100644 index f25e42268c..0000000000 --- a/tensor/tensormpi/tensor.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (c) 2020, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tensormpi - -import ( - "reflect" - - "cogentcore.org/core/base/mpi" - "cogentcore.org/core/tensor" -) - -// GatherTensorRows does an MPI AllGather on given src tensor data, gathering into dest, -// using a row-based tensor organization (as in an table.Table). -// dest will have np * src.Rows Rows, filled with each processor's data, in order. -// dest must have same overall shape as src at start, but rows will be enforced. -func GatherTensorRows(dest, src tensor.Tensor, comm *mpi.Comm) error { - dt := src.DataType() - if dt == reflect.String { - return GatherTensorRowsString(dest.(*tensor.String), src.(*tensor.String), comm) - } - sr, _ := src.RowCellSize() - dr, _ := dest.RowCellSize() - np := mpi.WorldSize() - dl := np * sr - if dr != dl { - dest.SetNumRows(dl) - dr = dl - } - - var err error - switch dt { - case reflect.Bool: - // todo - case reflect.Uint8: - dt := dest.(*tensor.Byte) - st := src.(*tensor.Byte) - err = comm.AllGatherU8(dt.Values, st.Values) - case reflect.Int32: - dt := dest.(*tensor.Int32) - st := src.(*tensor.Int32) - err = comm.AllGatherI32(dt.Values, st.Values) - case reflect.Int: - dt := dest.(*tensor.Int) - st := src.(*tensor.Int) - err = comm.AllGatherInt(dt.Values, st.Values) - case reflect.Float32: - dt := dest.(*tensor.Float32) - st := src.(*tensor.Float32) - err = comm.AllGatherF32(dt.Values, st.Values) - case reflect.Float64: - dt := dest.(*tensor.Float64) - st := src.(*tensor.Float64) - err = comm.AllGatherF64(dt.Values, st.Values) - } - return err -} - -// GatherTensorRowsString does an MPI AllGather on given String src tensor data, -// gathering into dest, using a row-based tensor organization (as in an table.Table). -// dest will have np * src.Rows Rows, filled with each processor's data, in order. -// dest must have same overall shape as src at start, but rows will be enforced. -func GatherTensorRowsString(dest, src *tensor.String, comm *mpi.Comm) error { - sr, _ := src.RowCellSize() - dr, _ := dest.RowCellSize() - np := mpi.WorldSize() - dl := np * sr - if dr != dl { - dest.SetNumRows(dl) - dr = dl - } - ssz := len(src.Values) - dsz := len(dest.Values) - sln := make([]int, ssz) - dln := make([]int, dsz) - for i, s := range src.Values { - sln[i] = len(s) - } - err := comm.AllGatherInt(dln, sln) - if err != nil { - return err - } - mxlen := 0 - for _, l := range dln { - mxlen = max(mxlen, l) - } - if mxlen == 0 { - return nil // nothing to transfer - } - sdt := make([]byte, ssz*mxlen) - ddt := make([]byte, dsz*mxlen) - idx := 0 - for _, s := range src.Values { - l := len(s) - copy(sdt[idx:idx+l], []byte(s)) - idx += mxlen - } - err = comm.AllGatherU8(ddt, sdt) - idx = 0 - for i := range dest.Values { - l := dln[i] - s := string(ddt[idx : idx+l]) - dest.Values[i] = s - idx += mxlen - } - return err -} - -// ReduceTensor does an MPI AllReduce on given src tensor data, using given operation, -// gathering into dest. dest must have same overall shape as src -- will be enforced. -// IMPORTANT: src and dest must be different slices! -// each processor must have the same shape and organization for this to make sense. -// does nothing for strings. -func ReduceTensor(dest, src tensor.Tensor, comm *mpi.Comm, op mpi.Op) error { - dt := src.DataType() - if dt == reflect.String { - return nil - } - slen := src.Len() - if slen != dest.Len() { - dest.CopyShapeFrom(src) - } - var err error - switch dt { - case reflect.Bool: - dt := dest.(*tensor.Bits) - st := src.(*tensor.Bits) - err = comm.AllReduceU8(op, dt.Values, st.Values) - case reflect.Uint8: - dt := dest.(*tensor.Byte) - st := src.(*tensor.Byte) - err = comm.AllReduceU8(op, dt.Values, st.Values) - case reflect.Int32: - dt := dest.(*tensor.Int32) - st := src.(*tensor.Int32) - err = comm.AllReduceI32(op, dt.Values, st.Values) - case reflect.Int: - dt := dest.(*tensor.Int) - st := src.(*tensor.Int) - err = comm.AllReduceInt(op, dt.Values, st.Values) - case reflect.Float32: - dt := dest.(*tensor.Float32) - st := src.(*tensor.Float32) - err = comm.AllReduceF32(op, dt.Values, st.Values) - case reflect.Float64: - dt := dest.(*tensor.Float64) - st := src.(*tensor.Float64) - err = comm.AllReduceF64(op, dt.Values, st.Values) - } - return err -} diff --git a/texteditor/editor.go b/texteditor/editor.go index 46b026ebe7..7819bd436a 100644 --- a/texteditor/editor.go +++ b/texteditor/editor.go @@ -248,6 +248,9 @@ func (ed *Editor) Init() { s.MaxBorder.Width.Set(units.Dp(2)) s.Background = colors.Scheme.SurfaceContainerLow + if s.IsReadOnly() { + s.Background = colors.Scheme.SurfaceContainer + } // note: a blank background does NOT work for depth color rendering if s.Is(states.Focused) { s.StateLayer = 0 @@ -317,9 +320,6 @@ func (ed *Editor) resetState() { if ed.Buffer == nil || ed.lastFilename != ed.Buffer.Filename { // don't reset if reopening.. ed.CursorPos = lexer.Pos{} } - if ed.Buffer != nil { - ed.Buffer.SetReadOnly(ed.IsReadOnly()) - } } // SetBuffer sets the [Buffer] that this is an editor of, and interconnects their events. diff --git a/texteditor/events.go b/texteditor/events.go index 90240efbbd..fe25f05c85 100644 --- a/texteditor/events.go +++ b/texteditor/events.go @@ -729,5 +729,13 @@ func (ed *Editor) contextMenu(m *core.Scene) { OnClick(func(e events.Event) { ed.Clear() }) + if ed.Buffer != nil && ed.Buffer.Info.Generated { + core.NewButton(m).SetText("Set editable").SetIcon(icons.Edit). + OnClick(func(e events.Event) { + ed.SetReadOnly(false) + ed.Buffer.Info.Generated = false + ed.Update() + }) + } } } diff --git a/texteditor/highlighting/defaults.highlighting b/texteditor/highlighting/defaults.highlighting index 3e42a6b9a9..cb7dd0aff8 100644 --- a/texteditor/highlighting/defaults.highlighting +++ b/texteditor/highlighting/defaults.highlighting @@ -6458,10 +6458,10 @@ "A": 0 }, "Background": { - "R": 225, - "G": 225, - "B": 225, - "A": 255 + "R": 0, + "G": 0, + "B": 0, + "A": 0 }, "Border": { "R": 0, diff --git a/texteditor/highlighting/style.go b/texteditor/highlighting/style.go index 634dc76e83..ec38c876ca 100644 --- a/texteditor/highlighting/style.go +++ b/texteditor/highlighting/style.go @@ -46,26 +46,34 @@ func (t Trilean) Prefix(s string) string { // StyleEntry is one value in the map of highlight style values type StyleEntry struct { - // text color + // Color is the text color. Color color.RGBA - // background color + // Background color. + // In general it is not good to use this because it obscures highlighting. Background color.RGBA - // border color? not sure what this is -- not really used + // Border color? not sure what this is -- not really used. Border color.RGBA `display:"-"` - // bold font + // Bold font. Bold Trilean - // italic font + // Italic font. Italic Trilean - // underline + // Underline. Underline Trilean - // don't inherit these settings from sub-category or category levels -- otherwise everything with a Pass is inherited + // NoInherit indicates to not inherit these settings from sub-category or category levels. + // Otherwise everything with a Pass is inherited. NoInherit bool + + // themeColor is the theme-adjusted text color. + themeColor color.RGBA + + // themeBackground is the theme-adjusted background color. + themeBackground color.RGBA } // // FromChroma copies styles from chroma @@ -108,7 +116,7 @@ func (se *StyleEntry) UpdateFromTheme() { if matcolor.SchemeIsDark { ctone = 80 } - se.Color = hc.WithChroma(max(hc.Chroma, 48)).WithTone(ctone).AsRGBA() + se.themeColor = hc.WithChroma(max(hc.Chroma, 48)).WithTone(ctone).AsRGBA() if !colors.IsNil(se.Background) { hb := hct.FromColor(se.Background) @@ -116,7 +124,7 @@ func (se *StyleEntry) UpdateFromTheme() { if matcolor.SchemeIsDark { btone = min(hb.Tone, 17) } - se.Background = hb.WithChroma(max(hb.Chroma, 6)).WithTone(btone).AsRGBA() + se.themeBackground = hb.WithChroma(max(hb.Chroma, 6)).WithTone(btone).AsRGBA() } } @@ -134,11 +142,11 @@ func (se StyleEntry) String() string { if se.NoInherit { out = append(out, "noinherit") } - if !colors.IsNil(se.Color) { - out = append(out, colors.AsString(se.Color)) + if !colors.IsNil(se.themeColor) { + out = append(out, colors.AsString(se.themeColor)) } - if !colors.IsNil(se.Background) { - out = append(out, "bg:"+colors.AsString(se.Background)) + if !colors.IsNil(se.themeBackground) { + out = append(out, "bg:"+colors.AsString(se.themeBackground)) } if !colors.IsNil(se.Border) { out = append(out, "border:"+colors.AsString(se.Border)) @@ -149,11 +157,11 @@ func (se StyleEntry) String() string { // ToCSS converts StyleEntry to CSS attributes. func (se StyleEntry) ToCSS() string { styles := []string{} - if !colors.IsNil(se.Color) { - styles = append(styles, "color: "+colors.AsString(se.Color)) + if !colors.IsNil(se.themeColor) { + styles = append(styles, "color: "+colors.AsString(se.themeColor)) } - if !colors.IsNil(se.Background) { - styles = append(styles, "background-color: "+colors.AsString(se.Background)) + if !colors.IsNil(se.themeBackground) { + styles = append(styles, "background-color: "+colors.AsString(se.themeBackground)) } if se.Bold == Yes { styles = append(styles, "font-weight: bold") @@ -170,11 +178,11 @@ func (se StyleEntry) ToCSS() string { // ToProperties converts the StyleEntry to key-value properties. func (se StyleEntry) ToProperties() map[string]any { pr := map[string]any{} - if !colors.IsNil(se.Color) { - pr["color"] = se.Color + if !colors.IsNil(se.themeColor) { + pr["color"] = se.themeColor } - if !colors.IsNil(se.Background) { - pr["background-color"] = se.Background + if !colors.IsNil(se.themeBackground) { + pr["background-color"] = se.themeBackground } if se.Bold == Yes { pr["font-weight"] = styles.WeightBold @@ -189,25 +197,27 @@ func (se StyleEntry) ToProperties() map[string]any { } // Sub subtracts two style entries, returning an entry with only the differences set -func (s StyleEntry) Sub(e StyleEntry) StyleEntry { +func (se StyleEntry) Sub(e StyleEntry) StyleEntry { out := StyleEntry{} - if e.Color != s.Color { - out.Color = s.Color + if e.Color != se.Color { + out.Color = se.Color + out.themeColor = se.themeColor } - if e.Background != s.Background { - out.Background = s.Background + if e.Background != se.Background { + out.Background = se.Background + out.themeBackground = se.themeBackground } - if e.Border != s.Border { - out.Border = s.Border + if e.Border != se.Border { + out.Border = se.Border } - if e.Bold != s.Bold { - out.Bold = s.Bold + if e.Bold != se.Bold { + out.Bold = se.Bold } - if e.Italic != s.Italic { - out.Italic = s.Italic + if e.Italic != se.Italic { + out.Italic = se.Italic } - if e.Underline != s.Underline { - out.Underline = s.Underline + if e.Underline != se.Underline { + out.Underline = se.Underline } return out } @@ -215,18 +225,20 @@ func (s StyleEntry) Sub(e StyleEntry) StyleEntry { // Inherit styles from ancestors. // // Ancestors should be provided from oldest, furthest away to newest, closest. -func (s StyleEntry) Inherit(ancestors ...StyleEntry) StyleEntry { - out := s +func (se StyleEntry) Inherit(ancestors ...StyleEntry) StyleEntry { + out := se for i := len(ancestors) - 1; i >= 0; i-- { if out.NoInherit { return out } ancestor := ancestors[i] - if colors.IsNil(out.Color) { + if colors.IsNil(out.themeColor) { out.Color = ancestor.Color + out.themeColor = ancestor.themeColor } - if colors.IsNil(out.Background) { + if colors.IsNil(out.themeBackground) { out.Background = ancestor.Background + out.themeBackground = ancestor.themeBackground } if colors.IsNil(out.Border) { out.Border = ancestor.Border @@ -244,9 +256,9 @@ func (s StyleEntry) Inherit(ancestors ...StyleEntry) StyleEntry { return out } -func (s StyleEntry) IsZero() bool { - return colors.IsNil(s.Color) && colors.IsNil(s.Background) && colors.IsNil(s.Border) && s.Bold == Pass && s.Italic == Pass && - s.Underline == Pass && !s.NoInherit +func (se StyleEntry) IsZero() bool { + return colors.IsNil(se.Color) && colors.IsNil(se.Background) && colors.IsNil(se.Border) && se.Bold == Pass && se.Italic == Pass && + se.Underline == Pass && !se.NoInherit } /////////////////////////////////////////////////////////////////////////////////// diff --git a/texteditor/highlighting/typegen.go b/texteditor/highlighting/typegen.go index 2566f3a9a0..ac1180c6c2 100644 --- a/texteditor/highlighting/typegen.go +++ b/texteditor/highlighting/typegen.go @@ -10,7 +10,7 @@ var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/texteditor/highligh var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/texteditor/highlighting.Trilean", IDName: "trilean", Doc: "Trilean value for StyleEntry value inheritance."}) -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/texteditor/highlighting.StyleEntry", IDName: "style-entry", Doc: "StyleEntry is one value in the map of highlight style values", Fields: []types.Field{{Name: "Color", Doc: "text color"}, {Name: "Background", Doc: "background color"}, {Name: "Border", Doc: "border color? not sure what this is -- not really used"}, {Name: "Bold", Doc: "bold font"}, {Name: "Italic", Doc: "italic font"}, {Name: "Underline", Doc: "underline"}, {Name: "NoInherit", Doc: "don't inherit these settings from sub-category or category levels -- otherwise everything with a Pass is inherited"}}}) +var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/texteditor/highlighting.StyleEntry", IDName: "style-entry", Doc: "StyleEntry is one value in the map of highlight style values", Fields: []types.Field{{Name: "Color", Doc: "Color is the text color."}, {Name: "Background", Doc: "Background color.\nIn general it is not good to use this because it obscures highlighting."}, {Name: "Border", Doc: "Border color? not sure what this is -- not really used."}, {Name: "Bold", Doc: "Bold font."}, {Name: "Italic", Doc: "Italic font."}, {Name: "Underline", Doc: "Underline."}, {Name: "NoInherit", Doc: "NoInherit indicates to not inherit these settings from sub-category or category levels.\nOtherwise everything with a Pass is inherited."}, {Name: "themeColor", Doc: "themeColor is the theme-adjusted text color."}, {Name: "themeBackground", Doc: "themeBackground is the theme-adjusted background color."}}}) var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/texteditor/highlighting.Style", IDName: "style", Doc: "Style is a full style map of styles for different token.Tokens tag values"}) diff --git a/texteditor/typegen.go b/texteditor/typegen.go index 22d09b6999..d603903f29 100644 --- a/texteditor/typegen.go +++ b/texteditor/typegen.go @@ -15,7 +15,7 @@ import ( var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/texteditor.Buffer", IDName: "buffer", Doc: "Buffer is a buffer of text, which can be viewed by [Editor](s).\nIt holds the raw text lines (in original string and rune formats,\nand marked-up from syntax highlighting), and sends signals for making\nedits to the text and coordinating those edits across multiple views.\nEditors always only view a single buffer, so they directly call methods\non the buffer to drive updates, which are then broadcast.\nIt also has methods for loading and saving buffers to files.\nUnlike GUI widgets, its methods generally send events, without an\nexplicit Event suffix.\nInternally, the buffer represents new lines using \\n = LF, but saving\nand loading can deal with Windows/DOS CRLF format.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Methods: []types.Method{{Name: "Open", Doc: "Open loads the given file into the buffer.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"filename"}, Returns: []string{"error"}}, {Name: "Revert", Doc: "Revert re-opens text from the current file,\nif the filename is set; returns false if not.\nIt uses an optimized diff-based update to preserve\nexisting formatting, making it very fast if not very different.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Returns: []string{"bool"}}, {Name: "SaveAs", Doc: "SaveAs saves the current text into given file; does an editDone first to save edits\nand checks for an existing file; if it does exist then prompts to overwrite or not.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"filename"}}, {Name: "Save", Doc: "Save saves the current text into the current filename associated with this buffer.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Returns: []string{"error"}}}, Embeds: []types.Field{{Name: "Lines"}}, Fields: []types.Field{{Name: "Filename", Doc: "Filename is the filename of the file that was last loaded or saved.\nIt is used when highlighting code."}, {Name: "Autosave", Doc: "Autosave specifies whether the file should be automatically\nsaved after changes are made."}, {Name: "Info", Doc: "Info is the full information about the current file."}, {Name: "LineColors", Doc: "LineColors are the colors to use for rendering circles\nnext to the line numbers of certain lines."}, {Name: "editors", Doc: "editors are the editors that are currently viewing this buffer."}, {Name: "posHistory", Doc: "posHistory is the history of cursor positions.\nIt can be used to move back through them."}, {Name: "Complete", Doc: "Complete is the functions and data for text completion."}, {Name: "spell", Doc: "spell is the functions and data for spelling correction."}, {Name: "currentEditor", Doc: "currentEditor is the current text editor, such as the one that initiated the\nComplete or Correct process. The cursor position in this view is updated, and\nit is reset to nil after usage."}, {Name: "listeners", Doc: "listeners is used for sending standard system events.\nChange is sent for BufferDone, BufferInsert, and BufferDelete."}, {Name: "autoSaving", Doc: "autoSaving is used in atomically safe way to protect autosaving"}, {Name: "notSaved", Doc: "notSaved indicates if the text has been changed (edited) relative to the\noriginal, since last Save. This can be true even when changed flag is\nfalse, because changed is cleared on EditDone, e.g., when texteditor\nis being monitored for OnChange and user does Control+Enter.\nUse IsNotSaved() method to query state."}, {Name: "fileModOK", Doc: "fileModOK have already asked about fact that file has changed since being\nopened, user is ok"}}}) -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/texteditor.DiffEditor", IDName: "diff-editor", Doc: "DiffEditor presents two side-by-side [Editor]s showing the differences\nbetween two files (represented as lines of strings).", Methods: []types.Method{{Name: "saveFileA", Doc: "saveFileA saves the current state of file A to given filename", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"fname"}}, {Name: "saveFileB", Doc: "saveFileB saves the current state of file B to given filename", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"fname"}}}, Embeds: []types.Field{{Name: "Frame"}}, Fields: []types.Field{{Name: "FileA", Doc: "first file name being compared"}, {Name: "FileB", Doc: "second file name being compared"}, {Name: "RevisionA", Doc: "revision for first file, if relevant"}, {Name: "RevisionB", Doc: "revision for second file, if relevant"}, {Name: "bufferA", Doc: "[Buffer] for A showing the aligned edit view"}, {Name: "bufferB", Doc: "[Buffer] for B showing the aligned edit view"}, {Name: "alignD", Doc: "aligned diffs records diff for aligned lines"}, {Name: "diffs", Doc: "diffs applied"}, {Name: "inInputEvent"}}}) +var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/texteditor.DiffEditor", IDName: "diff-editor", Doc: "DiffEditor presents two side-by-side [Editor]s showing the differences\nbetween two files (represented as lines of strings).", Methods: []types.Method{{Name: "saveFileA", Doc: "saveFileA saves the current state of file A to given filename", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"fname"}}, {Name: "saveFileB", Doc: "saveFileB saves the current state of file B to given filename", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"fname"}}}, Embeds: []types.Field{{Name: "Frame"}}, Fields: []types.Field{{Name: "FileA", Doc: "first file name being compared"}, {Name: "FileB", Doc: "second file name being compared"}, {Name: "RevisionA", Doc: "revision for first file, if relevant"}, {Name: "RevisionB", Doc: "revision for second file, if relevant"}, {Name: "bufferA", Doc: "[Buffer] for A showing the aligned edit view"}, {Name: "bufferB", Doc: "[Buffer] for B showing the aligned edit view"}, {Name: "alignD", Doc: "aligned diffs records diff for aligned lines"}, {Name: "diffs", Doc: "diffs applied"}, {Name: "inInputEvent"}, {Name: "toolbar"}}}) // NewDiffEditor returns a new [DiffEditor] with the given optional parent: // DiffEditor presents two side-by-side [Editor]s showing the differences diff --git a/types/typegen/generate.go b/types/typegen/generate.go new file mode 100644 index 0000000000..4ac44fabb8 --- /dev/null +++ b/types/typegen/generate.go @@ -0,0 +1,81 @@ +// Copyright (c) 2023, Cogent Core. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typgen provides the generation of type information for +// Go types, methods, and functions. +package typegen + +//go:generate core generate + +import ( + "fmt" + + "cogentcore.org/core/base/generate" + "golang.org/x/tools/go/packages" +) + +// ParsePackages parses the package(s) located in the configuration source directory. +func ParsePackages(cfg *Config) ([]*packages.Package, error) { + pcfg := &packages.Config{ + Mode: PackageModes(cfg), + // TODO: Need to think about types and functions in test files. Maybe write typegen_test.go + // in a separate pass? For later. + Tests: false, + } + pkgs, err := generate.Load(pcfg, cfg.Dir) + if err != nil { + return nil, fmt.Errorf("typegen: Generate: error parsing package: %w", err) + } + return pkgs, err +} + +// Generate generates typegen type info, using the +// configuration information, loading the packages from the +// configuration source directory, and writing the result +// to the configuration output file. +// +// It is a simple entry point to typegen that does all +// of the steps; for more specific functionality, create +// a new [Generator] with [NewGenerator] and call methods on it. +// +//cli:cmd -root +func Generate(cfg *Config) error { //types:add + pkgs, err := ParsePackages(cfg) + if err != nil { + return err + } + return GeneratePkgs(cfg, pkgs) +} + +// GeneratePkgs generates enum methods using +// the given configuration object and packages parsed +// from the configuration source directory, +// and writes the result to the config output file. +// It is a simple entry point to typegen that does all +// of the steps; for more specific functionality, create +// a new [Generator] with [NewGenerator] and call methods on it. +func GeneratePkgs(cfg *Config, pkgs []*packages.Package) error { + g := NewGenerator(cfg, pkgs) + for _, pkg := range g.Pkgs { + g.Pkg = pkg + g.Buf.Reset() + err := g.Find() + if err != nil { + return fmt.Errorf("typegen: Generate: error finding types for package %q: %w", pkg.Name, err) + } + g.PrintHeader() + has, err := g.Generate() + if !has { + continue + } + if err != nil { + return fmt.Errorf("typegen: Generate: error generating code for package %q: %w", pkg.Name, err) + } + err = g.Write() + if err != nil { + return fmt.Errorf("typegen: Generate: error writing code for package %q: %w", pkg.Name, err) + } + } + return nil +} diff --git a/types/typegen/typegen_test.go b/types/typegen/generate_test.go similarity index 100% rename from types/typegen/typegen_test.go rename to types/typegen/generate_test.go diff --git a/types/typegen/generator.go b/types/typegen/generator.go index 4c7e371adf..733c95809b 100644 --- a/types/typegen/generator.go +++ b/types/typegen/generator.go @@ -80,7 +80,7 @@ func (g *Generator) Find() error { return err } g.Types = []*Type{} - err = generate.Inspect(g.Pkg, g.Inspect) + err = generate.Inspect(g.Pkg, g.Inspect, "enumgen.go", "typegen.go") if err != nil { return fmt.Errorf("error while inspecting: %w", err) } diff --git a/types/typegen/testdata/typegen.go b/types/typegen/testdata/typegen.go index 3149353580..ecaada5d1d 100644 --- a/types/typegen/testdata/typegen.go +++ b/types/typegen/testdata/typegen.go @@ -1,4 +1,4 @@ -// Code generated by "typegen.test -test.testlogfile=/var/folders/x1/r8shprmj7j71zbw3qvgl9dqc0000gq/T/go-build1829688390/b982/testlog.txt -test.paniconexit0 -test.timeout=20s"; DO NOT EDIT. +// Code generated by "typegen.test -test.paniconexit0 -test.timeout=10m0s"; DO NOT EDIT. package testdata diff --git a/types/typegen/typegen.go b/types/typegen/typegen.go index 2a7eac6084..13d67e7030 100644 --- a/types/typegen/typegen.go +++ b/types/typegen/typegen.go @@ -1,81 +1,11 @@ -// Copyright (c) 2023, Cogent Core. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by "core generate"; DO NOT EDIT. -// Package typgen provides the generation of type information for -// Go types, methods, and functions. package typegen -//go:generate go run ../cmd/typegen -output typegen_gen.go - import ( - "fmt" - - "cogentcore.org/core/base/generate" - "golang.org/x/tools/go/packages" + "cogentcore.org/core/types" ) -// ParsePackages parses the package(s) located in the configuration source directory. -func ParsePackages(cfg *Config) ([]*packages.Package, error) { - pcfg := &packages.Config{ - Mode: PackageModes(cfg), - // TODO: Need to think about types and functions in test files. Maybe write typegen_test.go - // in a separate pass? For later. - Tests: false, - } - pkgs, err := generate.Load(pcfg, cfg.Dir) - if err != nil { - return nil, fmt.Errorf("typegen: Generate: error parsing package: %w", err) - } - return pkgs, err -} - -// Generate generates typegen type info, using the -// configuration information, loading the packages from the -// configuration source directory, and writing the result -// to the configuration output file. -// -// It is a simple entry point to typegen that does all -// of the steps; for more specific functionality, create -// a new [Generator] with [NewGenerator] and call methods on it. -// -//cli:cmd -root -func Generate(cfg *Config) error { //types:add - pkgs, err := ParsePackages(cfg) - if err != nil { - return err - } - return GeneratePkgs(cfg, pkgs) -} +var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/types/typegen.Config", IDName: "config", Doc: "Config contains the configuration information\nused by typegen", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Fields: []types.Field{{Name: "Dir", Doc: "the source directory to run typegen on (can be set to multiple through paths like ./...)"}, {Name: "Output", Doc: "the output file location relative to the package on which typegen is being called"}, {Name: "AddTypes", Doc: "whether to add types to typegen by default"}, {Name: "AddMethods", Doc: "whether to add methods to typegen by default"}, {Name: "AddFuncs", Doc: "whether to add functions to typegen by default"}, {Name: "InterfaceConfigs", Doc: "An ordered map of configs keyed by fully qualified interface type names; if a type implements the interface, the config will be applied to it.\nThe configs are applied in sequential ascending order, which means that\nthe last config overrides the other ones, so the most specific\ninterfaces should typically be put last.\nNote: the package typegen is run on must explicitly reference this interface at some point for this to work; adding a simple\n`var _ MyInterface = (*MyType)(nil)` statement to check for interface implementation is an easy way to accomplish that.\nNote: typegen will still succeed if it can not find one of the interfaces specified here in order to allow it to work generically across multiple directories; you can use the -v flag to get log warnings about this if you suspect that it is not finding interfaces when it should."}, {Name: "Setters", Doc: "Whether to generate chaining `Set*` methods for each exported field of each type (eg: \"SetText\" for field \"Text\").\nIf this is set to true, then you can add `set:\"-\"` struct tags to individual fields\nto prevent Set methods being generated for them."}, {Name: "Templates", Doc: "a slice of templates to execute on each type being added; the template data is of the type typegen.Type"}}}) -// GeneratePkgs generates enum methods using -// the given configuration object and packages parsed -// from the configuration source directory, -// and writes the result to the config output file. -// It is a simple entry point to typegen that does all -// of the steps; for more specific functionality, create -// a new [Generator] with [NewGenerator] and call methods on it. -func GeneratePkgs(cfg *Config, pkgs []*packages.Package) error { - g := NewGenerator(cfg, pkgs) - for _, pkg := range g.Pkgs { - g.Pkg = pkg - g.Buf.Reset() - err := g.Find() - if err != nil { - return fmt.Errorf("typegen: Generate: error finding types for package %q: %w", pkg.Name, err) - } - g.PrintHeader() - has, err := g.Generate() - if !has { - continue - } - if err != nil { - return fmt.Errorf("typegen: Generate: error generating code for package %q: %w", pkg.Name, err) - } - err = g.Write() - if err != nil { - return fmt.Errorf("typegen: Generate: error writing code for package %q: %w", pkg.Name, err) - } - } - return nil -} +var _ = types.AddFunc(&types.Func{Name: "cogentcore.org/core/types/typegen.Generate", Doc: "Generate generates typegen type info, using the\nconfiguration information, loading the packages from the\nconfiguration source directory, and writing the result\nto the configuration output file.\n\nIt is a simple entry point to typegen that does all\nof the steps; for more specific functionality, create\na new [Generator] with [NewGenerator] and call methods on it.", Directives: []types.Directive{{Tool: "cli", Directive: "cmd", Args: []string{"-root"}}, {Tool: "types", Directive: "add"}}, Args: []string{"cfg"}, Returns: []string{"error"}}) diff --git a/types/typegen/typegen_gen.go b/types/typegen/typegen_gen.go deleted file mode 100644 index 1e087d2d29..0000000000 --- a/types/typegen/typegen_gen.go +++ /dev/null @@ -1,11 +0,0 @@ -// Code generated by "typegen -output typegen_gen.go"; DO NOT EDIT. - -package typegen - -import ( - "cogentcore.org/core/types" -) - -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/types/typegen.Config", IDName: "config", Doc: "Config contains the configuration information\nused by typegen", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Fields: []types.Field{{Name: "Dir", Doc: "the source directory to run typegen on (can be set to multiple through paths like ./...)"}, {Name: "Output", Doc: "the output file location relative to the package on which typegen is being called"}, {Name: "AddTypes", Doc: "whether to add types to typegen by default"}, {Name: "AddMethods", Doc: "whether to add methods to typegen by default"}, {Name: "AddFuncs", Doc: "whether to add functions to typegen by default"}, {Name: "InterfaceConfigs", Doc: "An ordered map of configs keyed by fully qualified interface type names; if a type implements the interface, the config will be applied to it.\nThe configs are applied in sequential ascending order, which means that\nthe last config overrides the other ones, so the most specific\ninterfaces should typically be put last.\nNote: the package typegen is run on must explicitly reference this interface at some point for this to work; adding a simple\n`var _ MyInterface = (*MyType)(nil)` statement to check for interface implementation is an easy way to accomplish that.\nNote: typegen will still succeed if it can not find one of the interfaces specified here in order to allow it to work generically across multiple directories; you can use the -v flag to get log warnings about this if you suspect that it is not finding interfaces when it should."}, {Name: "Setters", Doc: "Whether to generate chaining `Set*` methods for each exported field of each type (eg: \"SetText\" for field \"Text\").\nIf this is set to true, then you can add `set:\"-\"` struct tags to individual fields\nto prevent Set methods being generated for them."}, {Name: "Templates", Doc: "a slice of templates to execute on each type being added; the template data is of the type typegen.Type"}}}) - -var _ = types.AddFunc(&types.Func{Name: "cogentcore.org/core/types/typegen.Generate", Doc: "Generate generates typegen type info, using the\nconfiguration information, loading the packages from the\nconfiguration source directory, and writing the result\nto the configuration output file.\n\nIt is a simple entry point to typegen that does all\nof the steps; for more specific functionality, create\na new [Generator] with [NewGenerator] and call methods on it.", Directives: []types.Directive{{Tool: "cli", Directive: "cmd", Args: []string{"-root"}}, {Tool: "types", Directive: "add"}}, Args: []string{"cfg"}, Returns: []string{"error"}}) diff --git a/xyz/typegen.go b/xyz/typegen.go index 801ba84539..66e8ac870c 100644 --- a/xyz/typegen.go +++ b/xyz/typegen.go @@ -62,8 +62,6 @@ var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/xyz.LightColors", I var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/xyz.Lines", IDName: "lines", Doc: "Lines are lines rendered as long thin boxes defined by points\nand width parameters. The Mesh must be drawn in the XY plane (i.e., use Z = 0\nor a constant unless specifically relevant to have full 3D variation).\nRotate the solid to put into other planes.", Embeds: []types.Field{{Name: "MeshBase"}}, Fields: []types.Field{{Name: "Points", Doc: "line points (must be 2 or more)"}, {Name: "Width", Doc: "line width, Y = height perpendicular to line direction, and X = depth"}, {Name: "Colors", Doc: "optional colors for each point -- actual color interpolates between"}, {Name: "Closed", Doc: "if true, connect the first and last points to form a closed shape"}}}) -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/xyz.Line", IDName: "line", Doc: "Line is a Solid that is used for line elements.\nType is need to trigger more precise event handling.", Directives: []types.Directive{{Tool: "core", Directive: "no-new"}}, Embeds: []types.Field{{Name: "Solid"}}}) - var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/xyz.Material", IDName: "material", Doc: "Material describes the material properties of a surface (colors, shininess, texture)\ni.e., phong lighting parameters.\nMain color is used for both ambient and diffuse color, and alpha component\nis used for opacity. The Emissive color is only for glowing objects.\nThe Specular color is always white (multiplied by light color).\nTextures are stored on the Scene and accessed by name", Directives: []types.Directive{{Tool: "types", Directive: "add", Args: []string{"-setters"}}}, Fields: []types.Field{{Name: "Color", Doc: "Color is the main color of surface, used for both ambient and diffuse color in standard Phong model -- alpha component determines transparency -- note that transparent objects require more complex rendering"}, {Name: "Emissive", Doc: "Emissive is the color that surface emits independent of any lighting -- i.e., glow -- can be used for marking lights with an object"}, {Name: "Shiny", Doc: "Shiny is the specular shininess factor -- how focally vs. broad the surface shines back directional light -- this is an exponential factor, with 0 = very broad diffuse reflection, and higher values (typically max of 128 or so but can go higher) having a smaller more focal specular reflection. Also set Reflective factor to change overall shininess effect."}, {Name: "Reflective", Doc: "Reflective is the specular reflectiveness factor -- how much it shines back directional light. The specular reflection color is always white * the incoming light."}, {Name: "Bright", Doc: "Bright is an overall multiplier on final computed color value -- can be used to tune the overall brightness of various surfaces relative to each other for a given set of lighting parameters"}, {Name: "TextureName", Doc: "TextureName is the name of the texture to provide color for the surface."}, {Name: "Tiling", Doc: "Tiling is the texture tiling parameters: repeat and offset."}, {Name: "CullBack", Doc: "CullBack indicates to cull the back-facing surfaces."}, {Name: "CullFront", Doc: "CullFront indicates to cull the front-facing surfaces."}, {Name: "Texture", Doc: "Texture is the cached [Texture] object set based on [Material.TextureName]."}}}) // SetColor sets the [Material.Color]: @@ -124,7 +122,7 @@ var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/xyz.GenMesh", IDNam var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/xyz.Node", IDName: "node", Doc: "Node is the common interface for all xyz 3D tree nodes.\n[Solid] and [Group] are the two main types of nodes,\nwhich both extend [NodeBase] for the core functionality.", Methods: []types.Method{{Name: "AsNodeBase", Doc: "AsNodeBase returns the [NodeBase] for our node, which gives\naccess to all the base-level data structures and methods\nwithout requiring interface methods.", Returns: []string{"NodeBase"}}, {Name: "IsSolid", Doc: "IsSolid returns true if this is an [Solid] node (otherwise a [Group]).", Returns: []string{"bool"}}, {Name: "AsSolid", Doc: "AsSolid returns the node as a [Solid] (nil if not).", Returns: []string{"Solid"}}, {Name: "Validate", Doc: "Validate checks that scene element is valid.", Returns: []string{"error"}}, {Name: "UpdateWorldMatrix", Doc: "UpdateWorldMatrix updates this node's local and world matrix based on parent's world matrix.", Args: []string{"parWorld"}}, {Name: "UpdateMeshBBox", Doc: "UpdateMeshBBox updates the Mesh-based BBox info for all nodes.\ngroups aggregate over elements. It is called from WalkPost traversal."}, {Name: "IsVisible", Doc: "IsVisible provides the definitive answer as to whether a given node\nis currently visible. It is only entirely valid after a render pass\nfor widgets in a visible window, but it checks the window and viewport\nfor their visibility status as well, which is available always.\nNon-visible nodes are automatically not rendered and not connected to\nwindow events. The Invisible flag is one key element of the IsVisible\ncalculus; it is set by e.g., TabView for invisible tabs, and is also\nset if a widget is entirely out of render range. But again, use\nIsVisible as the main end-user method.\nFor robustness, it recursively calls the parent; this is typically\na short path; propagating the Invisible flag properly can be\nvery challenging without mistakenly overwriting invisibility at various\nlevels.", Returns: []string{"bool"}}, {Name: "IsTransparent", Doc: "IsTransparent returns true if solid has transparent color.", Returns: []string{"bool"}}, {Name: "Config", Doc: "Config configures the node."}, {Name: "RenderClass", Doc: "RenderClass returns the class of rendering for this solid.\nIt is used for organizing the ordering of rendering.", Returns: []string{"RenderClasses"}}, {Name: "PreRender", Doc: "PreRender is called by Scene Render to upload\nall the object data to the Phong renderer."}, {Name: "Render", Doc: "Render is called by Scene Render to actually render.", Args: []string{"rp"}}}}) -var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/xyz.NodeBase", IDName: "node-base", Doc: "NodeBase is the basic 3D tree node, which has the full transform information\nrelative to parent, and computed bounding boxes, etc.\nIt implements the [Node] interface and contains the core functionality\ncommon to all 3D nodes.", Embeds: []types.Field{{Name: "NodeBase"}}, Fields: []types.Field{{Name: "Invisible", Doc: "Invisible is whether this node is invisible."}, {Name: "Pose", Doc: "Pose is the complete specification of position and orientation."}, {Name: "Scene", Doc: "Scene is the cached [Scene]."}, {Name: "MeshBBox", Doc: "mesh-based local bounding box (aggregated for groups)"}, {Name: "WorldBBox", Doc: "world coordinates bounding box"}, {Name: "NDCBBox", Doc: "normalized display coordinates bounding box, used for frustrum clipping"}, {Name: "BBox", Doc: "raw original bounding box for the widget within its parent Scene.\nThis is prior to intersecting with Frame bounds."}, {Name: "SceneBBox", Doc: "2D bounding box for region occupied within Scene Frame that we render onto.\nThis is BBox intersected with Frame bounds."}}}) +var _ = types.AddType(&types.Type{Name: "cogentcore.org/core/xyz.NodeBase", IDName: "node-base", Doc: "NodeBase is the basic 3D tree node, which has the full transform information\nrelative to parent, and computed bounding boxes, etc.\nIt implements the [Node] interface and contains the core functionality\ncommon to all 3D nodes.", Embeds: []types.Field{{Name: "NodeBase"}}, Fields: []types.Field{{Name: "Invisible", Doc: "Invisible is whether this node is invisible."}, {Name: "Pose", Doc: "Pose is the complete specification of position and orientation."}, {Name: "Scene", Doc: "Scene is the cached [Scene]."}, {Name: "MeshBBox", Doc: "mesh-based local bounding box (aggregated for groups)"}, {Name: "WorldBBox", Doc: "world coordinates bounding box"}, {Name: "NDCBBox", Doc: "normalized display coordinates bounding box, used for frustrum clipping"}, {Name: "BBox", Doc: "raw original bounding box for the widget within its parent Scene.\nThis is prior to intersecting with Frame bounds."}, {Name: "SceneBBox", Doc: "2D bounding box for region occupied within Scene Frame that we render onto.\nThis is BBox intersected with Frame bounds."}, {Name: "isLinear", Doc: "isLinear indicates that this element contains a line-like shape,\nwhich engages a more selective event processing logic to determine\nif the node was selected based on a mouse click point."}}}) // NewNodeBase returns a new [NodeBase] with the given optional parent: // NodeBase is the basic 3D tree node, which has the full transform information diff --git a/yaegicore/symbols/cogentcore_org-core-base-errors.go b/yaegicore/basesymbols/cogentcore_org-core-base-errors.go similarity index 98% rename from yaegicore/symbols/cogentcore_org-core-base-errors.go rename to yaegicore/basesymbols/cogentcore_org-core-base-errors.go index 6e34726b01..75ee62c9b2 100644 --- a/yaegicore/symbols/cogentcore_org-core-base-errors.go +++ b/yaegicore/basesymbols/cogentcore_org-core-base-errors.go @@ -1,6 +1,6 @@ // Code generated by 'yaegi extract cogentcore.org/core/base/errors'. DO NOT EDIT. -package symbols +package basesymbols import ( "cogentcore.org/core/base/errors" diff --git a/yaegicore/symbols/cogentcore_org-core-base-fileinfo.go b/yaegicore/basesymbols/cogentcore_org-core-base-fileinfo.go similarity index 99% rename from yaegicore/symbols/cogentcore_org-core-base-fileinfo.go rename to yaegicore/basesymbols/cogentcore_org-core-base-fileinfo.go index 0f3a5a442f..9a6688e28d 100644 --- a/yaegicore/symbols/cogentcore_org-core-base-fileinfo.go +++ b/yaegicore/basesymbols/cogentcore_org-core-base-fileinfo.go @@ -1,6 +1,6 @@ // Code generated by 'yaegi extract cogentcore.org/core/base/fileinfo'. DO NOT EDIT. -package symbols +package basesymbols import ( "cogentcore.org/core/base/fileinfo" @@ -89,6 +89,7 @@ func init() { "Icons": reflect.ValueOf(&fileinfo.Icons).Elem(), "Image": reflect.ValueOf(fileinfo.Image), "Ini": reflect.ValueOf(fileinfo.Ini), + "IsGeneratedFile": reflect.ValueOf(fileinfo.IsGeneratedFile), "IsMatch": reflect.ValueOf(fileinfo.IsMatch), "IsMatchList": reflect.ValueOf(fileinfo.IsMatchList), "Java": reflect.ValueOf(fileinfo.Java), diff --git a/yaegicore/symbols/cogentcore_org-core-base-fsx.go b/yaegicore/basesymbols/cogentcore_org-core-base-fsx.go similarity index 91% rename from yaegicore/symbols/cogentcore_org-core-base-fsx.go rename to yaegicore/basesymbols/cogentcore_org-core-base-fsx.go index 4083d45fd0..4e43da0e76 100644 --- a/yaegicore/symbols/cogentcore_org-core-base-fsx.go +++ b/yaegicore/basesymbols/cogentcore_org-core-base-fsx.go @@ -1,6 +1,6 @@ // Code generated by 'yaegi extract cogentcore.org/core/base/fsx'. DO NOT EDIT. -package symbols +package basesymbols import ( "cogentcore.org/core/base/fsx" @@ -25,5 +25,8 @@ func init() { "RelativeFilePath": reflect.ValueOf(fsx.RelativeFilePath), "SplitRootPathFS": reflect.ValueOf(fsx.SplitRootPathFS), "Sub": reflect.ValueOf(fsx.Sub), + + // type definitions + "Filename": reflect.ValueOf((*fsx.Filename)(nil)), } } diff --git a/yaegicore/symbols/cogentcore_org-core-base-labels.go b/yaegicore/basesymbols/cogentcore_org-core-base-labels.go similarity index 98% rename from yaegicore/symbols/cogentcore_org-core-base-labels.go rename to yaegicore/basesymbols/cogentcore_org-core-base-labels.go index 8e95a74de5..e76dcd5802 100644 --- a/yaegicore/symbols/cogentcore_org-core-base-labels.go +++ b/yaegicore/basesymbols/cogentcore_org-core-base-labels.go @@ -1,6 +1,6 @@ // Code generated by 'yaegi extract cogentcore.org/core/base/labels'. DO NOT EDIT. -package symbols +package basesymbols import ( "cogentcore.org/core/base/labels" diff --git a/yaegicore/basesymbols/cogentcore_org-core-base-num.go b/yaegicore/basesymbols/cogentcore_org-core-base-num.go new file mode 100644 index 0000000000..186c94aec9 --- /dev/null +++ b/yaegicore/basesymbols/cogentcore_org-core-base-num.go @@ -0,0 +1,11 @@ +// Code generated by 'yaegi extract cogentcore.org/core/base/num'. DO NOT EDIT. + +package basesymbols + +import ( + "reflect" +) + +func init() { + Symbols["cogentcore.org/core/base/num/num"] = map[string]reflect.Value{} +} diff --git a/yaegicore/symbols/cogentcore_org-core-base-reflectx.go b/yaegicore/basesymbols/cogentcore_org-core-base-reflectx.go similarity index 93% rename from yaegicore/symbols/cogentcore_org-core-base-reflectx.go rename to yaegicore/basesymbols/cogentcore_org-core-base-reflectx.go index c341675c80..a6232911ca 100644 --- a/yaegicore/symbols/cogentcore_org-core-base-reflectx.go +++ b/yaegicore/basesymbols/cogentcore_org-core-base-reflectx.go @@ -1,6 +1,6 @@ // Code generated by 'yaegi extract cogentcore.org/core/base/reflectx'. DO NOT EDIT. -package symbols +package basesymbols import ( "cogentcore.org/core/base/reflectx" @@ -12,11 +12,15 @@ func init() { Symbols["cogentcore.org/core/base/reflectx/reflectx"] = map[string]reflect.Value{ // function, constant and variable definitions "CloneToType": reflect.ValueOf(reflectx.CloneToType), + "CopyFields": reflect.ValueOf(reflectx.CopyFields), "CopyMapRobust": reflect.ValueOf(reflectx.CopyMapRobust), "CopySliceRobust": reflect.ValueOf(reflectx.CopySliceRobust), + "FieldByPath": reflect.ValueOf(reflectx.FieldByPath), "FormatDefault": reflect.ValueOf(reflectx.FormatDefault), "IsNil": reflect.ValueOf(reflectx.IsNil), "KindIsBasic": reflect.ValueOf(reflectx.KindIsBasic), + "KindIsFloat": reflect.ValueOf(reflectx.KindIsFloat), + "KindIsInt": reflect.ValueOf(reflectx.KindIsInt), "KindIsNumber": reflect.ValueOf(reflectx.KindIsNumber), "LongTypeName": reflect.ValueOf(reflectx.LongTypeName), "MapAdd": reflect.ValueOf(reflectx.MapAdd), @@ -33,6 +37,7 @@ func init() { "NumAllFields": reflect.ValueOf(reflectx.NumAllFields), "OnePointerValue": reflect.ValueOf(reflectx.OnePointerValue), "PointerValue": reflect.ValueOf(reflectx.PointerValue), + "SetFieldsFromMap": reflect.ValueOf(reflectx.SetFieldsFromMap), "SetFromDefaultTag": reflect.ValueOf(reflectx.SetFromDefaultTag), "SetFromDefaultTags": reflect.ValueOf(reflectx.SetFromDefaultTags), "SetMapRobust": reflect.ValueOf(reflectx.SetMapRobust), diff --git a/yaegicore/symbols/cogentcore_org-core-math32.go b/yaegicore/basesymbols/cogentcore_org-core-math32.go similarity index 99% rename from yaegicore/symbols/cogentcore_org-core-math32.go rename to yaegicore/basesymbols/cogentcore_org-core-math32.go index df1c0a89e8..2ae2eb9c3a 100644 --- a/yaegicore/symbols/cogentcore_org-core-math32.go +++ b/yaegicore/basesymbols/cogentcore_org-core-math32.go @@ -1,6 +1,6 @@ // Code generated by 'yaegi extract cogentcore.org/core/math32'. DO NOT EDIT. -package symbols +package basesymbols import ( "cogentcore.org/core/math32" @@ -29,8 +29,6 @@ func init() { "BarycoordFromPoint": reflect.ValueOf(math32.BarycoordFromPoint), "Cbrt": reflect.ValueOf(math32.Cbrt), "Ceil": reflect.ValueOf(math32.Ceil), - "Clamp": reflect.ValueOf(math32.Clamp), - "ClampInt": reflect.ValueOf(math32.ClampInt), "ContainsPoint": reflect.ValueOf(math32.ContainsPoint), "CopyFloat32s": reflect.ValueOf(math32.CopyFloat32s), "CopyFloat64s": reflect.ValueOf(math32.CopyFloat64s), diff --git a/yaegicore/symbols/fmt.go b/yaegicore/basesymbols/fmt.go similarity index 99% rename from yaegicore/symbols/fmt.go rename to yaegicore/basesymbols/fmt.go index 4a2e90c6a0..770b779202 100644 --- a/yaegicore/symbols/fmt.go +++ b/yaegicore/basesymbols/fmt.go @@ -3,7 +3,7 @@ //go:build go1.22 // +build go1.22 -package symbols +package basesymbols import ( "fmt" diff --git a/yaegicore/symbols/log-slog.go b/yaegicore/basesymbols/log-slog.go similarity index 99% rename from yaegicore/symbols/log-slog.go rename to yaegicore/basesymbols/log-slog.go index 267e584759..b4058c6dd5 100644 --- a/yaegicore/symbols/log-slog.go +++ b/yaegicore/basesymbols/log-slog.go @@ -3,7 +3,7 @@ //go:build go1.22 // +build go1.22 -package symbols +package basesymbols import ( "context" diff --git a/yaegicore/basesymbols/make b/yaegicore/basesymbols/make new file mode 100755 index 0000000000..3ef235e931 --- /dev/null +++ b/yaegicore/basesymbols/make @@ -0,0 +1,12 @@ +#!/usr/bin/env goal + +command extract { + for _, pkg := range args { + yaegi extract {"cogentcore.org/core/"+pkg} + } +} + +yaegi extract fmt strconv strings math time log/slog reflect + +extract math32 base/errors base/fsx base/reflectx base/labels base/fileinfo base/num + diff --git a/yaegicore/basesymbols/math.go b/yaegicore/basesymbols/math.go new file mode 100644 index 0000000000..481253d25e --- /dev/null +++ b/yaegicore/basesymbols/math.go @@ -0,0 +1,116 @@ +// Code generated by 'yaegi extract math'. DO NOT EDIT. + +//go:build go1.22 +// +build go1.22 + +package basesymbols + +import ( + "go/constant" + "go/token" + "math" + "reflect" +) + +func init() { + Symbols["math/math"] = map[string]reflect.Value{ + // function, constant and variable definitions + "Abs": reflect.ValueOf(math.Abs), + "Acos": reflect.ValueOf(math.Acos), + "Acosh": reflect.ValueOf(math.Acosh), + "Asin": reflect.ValueOf(math.Asin), + "Asinh": reflect.ValueOf(math.Asinh), + "Atan": reflect.ValueOf(math.Atan), + "Atan2": reflect.ValueOf(math.Atan2), + "Atanh": reflect.ValueOf(math.Atanh), + "Cbrt": reflect.ValueOf(math.Cbrt), + "Ceil": reflect.ValueOf(math.Ceil), + "Copysign": reflect.ValueOf(math.Copysign), + "Cos": reflect.ValueOf(math.Cos), + "Cosh": reflect.ValueOf(math.Cosh), + "Dim": reflect.ValueOf(math.Dim), + "E": reflect.ValueOf(constant.MakeFromLiteral("2.71828182845904523536028747135266249775724709369995957496696762566337824315673231520670375558666729784504486779277967997696994772644702281675346915668215131895555530285035761295375777990557253360748291015625", token.FLOAT, 0)), + "Erf": reflect.ValueOf(math.Erf), + "Erfc": reflect.ValueOf(math.Erfc), + "Erfcinv": reflect.ValueOf(math.Erfcinv), + "Erfinv": reflect.ValueOf(math.Erfinv), + "Exp": reflect.ValueOf(math.Exp), + "Exp2": reflect.ValueOf(math.Exp2), + "Expm1": reflect.ValueOf(math.Expm1), + "FMA": reflect.ValueOf(math.FMA), + "Float32bits": reflect.ValueOf(math.Float32bits), + "Float32frombits": reflect.ValueOf(math.Float32frombits), + "Float64bits": reflect.ValueOf(math.Float64bits), + "Float64frombits": reflect.ValueOf(math.Float64frombits), + "Floor": reflect.ValueOf(math.Floor), + "Frexp": reflect.ValueOf(math.Frexp), + "Gamma": reflect.ValueOf(math.Gamma), + "Hypot": reflect.ValueOf(math.Hypot), + "Ilogb": reflect.ValueOf(math.Ilogb), + "Inf": reflect.ValueOf(math.Inf), + "IsInf": reflect.ValueOf(math.IsInf), + "IsNaN": reflect.ValueOf(math.IsNaN), + "J0": reflect.ValueOf(math.J0), + "J1": reflect.ValueOf(math.J1), + "Jn": reflect.ValueOf(math.Jn), + "Ldexp": reflect.ValueOf(math.Ldexp), + "Lgamma": reflect.ValueOf(math.Lgamma), + "Ln10": reflect.ValueOf(constant.MakeFromLiteral("2.30258509299404568401799145468436420760110148862877297603332784146804725494827975466552490443295866962642372461496758838959542646932914211937012833592062802600362869664962772731087170541286468505859375", token.FLOAT, 0)), + "Ln2": reflect.ValueOf(constant.MakeFromLiteral("0.6931471805599453094172321214581765680755001343602552541206800092715999496201383079363438206637927920954189307729314303884387720696314608777673678644642390655170150035209453154294578780536539852619171142578125", token.FLOAT, 0)), + "Log": reflect.ValueOf(math.Log), + "Log10": reflect.ValueOf(math.Log10), + "Log10E": reflect.ValueOf(constant.MakeFromLiteral("0.43429448190325182765112891891660508229439700580366656611445378416636798190620320263064286300825210972160277489744884502676719847561509639618196799746596688688378591625127711495224502868950366973876953125", token.FLOAT, 0)), + "Log1p": reflect.ValueOf(math.Log1p), + "Log2": reflect.ValueOf(math.Log2), + "Log2E": reflect.ValueOf(constant.MakeFromLiteral("1.44269504088896340735992468100189213742664595415298593413544940772066427768997545329060870636212628972710992130324953463427359402479619301286929040235571747101382214539290471666532766903401352465152740478515625", token.FLOAT, 0)), + "Logb": reflect.ValueOf(math.Logb), + "Max": reflect.ValueOf(math.Max), + "MaxFloat32": reflect.ValueOf(constant.MakeFromLiteral("340282346638528859811704183484516925440", token.FLOAT, 0)), + "MaxFloat64": reflect.ValueOf(constant.MakeFromLiteral("179769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458953514382464234321326889464182768467546703537516986049910576551282076245490090389328944075868508455133942304583236903222948165808559332123348274797826204144723168738177180919299881250404026184124858368", token.FLOAT, 0)), + "MaxInt": reflect.ValueOf(constant.MakeFromLiteral("9223372036854775807", token.INT, 0)), + "MaxInt16": reflect.ValueOf(constant.MakeFromLiteral("32767", token.INT, 0)), + "MaxInt32": reflect.ValueOf(constant.MakeFromLiteral("2147483647", token.INT, 0)), + "MaxInt64": reflect.ValueOf(constant.MakeFromLiteral("9223372036854775807", token.INT, 0)), + "MaxInt8": reflect.ValueOf(constant.MakeFromLiteral("127", token.INT, 0)), + "MaxUint": reflect.ValueOf(constant.MakeFromLiteral("18446744073709551615", token.INT, 0)), + "MaxUint16": reflect.ValueOf(constant.MakeFromLiteral("65535", token.INT, 0)), + "MaxUint32": reflect.ValueOf(constant.MakeFromLiteral("4294967295", token.INT, 0)), + "MaxUint64": reflect.ValueOf(constant.MakeFromLiteral("18446744073709551615", token.INT, 0)), + "MaxUint8": reflect.ValueOf(constant.MakeFromLiteral("255", token.INT, 0)), + "Min": reflect.ValueOf(math.Min), + "MinInt": reflect.ValueOf(constant.MakeFromLiteral("-9223372036854775808", token.INT, 0)), + "MinInt16": reflect.ValueOf(constant.MakeFromLiteral("-32768", token.INT, 0)), + "MinInt32": reflect.ValueOf(constant.MakeFromLiteral("-2147483648", token.INT, 0)), + "MinInt64": reflect.ValueOf(constant.MakeFromLiteral("-9223372036854775808", token.INT, 0)), + "MinInt8": reflect.ValueOf(constant.MakeFromLiteral("-128", token.INT, 0)), + "Mod": reflect.ValueOf(math.Mod), + "Modf": reflect.ValueOf(math.Modf), + "NaN": reflect.ValueOf(math.NaN), + "Nextafter": reflect.ValueOf(math.Nextafter), + "Nextafter32": reflect.ValueOf(math.Nextafter32), + "Phi": reflect.ValueOf(constant.MakeFromLiteral("1.6180339887498948482045868343656381177203091798057628621354486119746080982153796619881086049305501566952211682590824739205931370737029882996587050475921915678674035433959321750307935872115194797515869140625", token.FLOAT, 0)), + "Pi": reflect.ValueOf(constant.MakeFromLiteral("3.141592653589793238462643383279502884197169399375105820974944594789982923695635954704435713335896673485663389728754819466702315787113662862838515639906529162340867271374644786874341662041842937469482421875", token.FLOAT, 0)), + "Pow": reflect.ValueOf(math.Pow), + "Pow10": reflect.ValueOf(math.Pow10), + "Remainder": reflect.ValueOf(math.Remainder), + "Round": reflect.ValueOf(math.Round), + "RoundToEven": reflect.ValueOf(math.RoundToEven), + "Signbit": reflect.ValueOf(math.Signbit), + "Sin": reflect.ValueOf(math.Sin), + "Sincos": reflect.ValueOf(math.Sincos), + "Sinh": reflect.ValueOf(math.Sinh), + "SmallestNonzeroFloat32": reflect.ValueOf(constant.MakeFromLiteral("1.40129846432481707092372958328991613128026194187651577175706828388979108268586060148663818836212158203125e-45", token.FLOAT, 0)), + "SmallestNonzeroFloat64": reflect.ValueOf(constant.MakeFromLiteral("4.940656458412465441765687928682213723650598026143247644255856825006755072702087518652998363616359923797965646954457177309266567103559397963987747960107818781263007131903114045278458171678489821036887186360569987307230500063874091535649843873124733972731696151400317153853980741262385655911710266585566867681870395603106249319452715914924553293054565444011274801297099995419319894090804165633245247571478690147267801593552386115501348035264934720193790268107107491703332226844753335720832431936092382893458368060106011506169809753078342277318329247904982524730776375927247874656084778203734469699533647017972677717585125660551199131504891101451037862738167250955837389733598993664809941164205702637090279242767544565229087538682506419718265533447265625e-324", token.FLOAT, 0)), + "Sqrt": reflect.ValueOf(math.Sqrt), + "Sqrt2": reflect.ValueOf(constant.MakeFromLiteral("1.414213562373095048801688724209698078569671875376948073176679739576083351575381440094441524123797447886801949755143139115339040409162552642832693297721230919563348109313505318596071447245776653289794921875", token.FLOAT, 0)), + "SqrtE": reflect.ValueOf(constant.MakeFromLiteral("1.64872127070012814684865078781416357165377610071014801157507931167328763229187870850146925823776361770041160388013884200789716007979526823569827080974091691342077871211546646890155898290686309337615966796875", token.FLOAT, 0)), + "SqrtPhi": reflect.ValueOf(constant.MakeFromLiteral("1.2720196495140689642524224617374914917156080418400962486166403754616080542166459302584536396369727769747312116100875915825863540562126478288118732191412003988041797518382391984914647764526307582855224609375", token.FLOAT, 0)), + "SqrtPi": reflect.ValueOf(constant.MakeFromLiteral("1.772453850905516027298167483341145182797549456122387128213807789740599698370237052541269446184448945647349951047154197675245574635259260134350885938555625028620527962319730619356050738133490085601806640625", token.FLOAT, 0)), + "Tan": reflect.ValueOf(math.Tan), + "Tanh": reflect.ValueOf(math.Tanh), + "Trunc": reflect.ValueOf(math.Trunc), + "Y0": reflect.ValueOf(math.Y0), + "Y1": reflect.ValueOf(math.Y1), + "Yn": reflect.ValueOf(math.Yn), + } +} diff --git a/yaegicore/symbols/reflect.go b/yaegicore/basesymbols/reflect.go similarity index 99% rename from yaegicore/symbols/reflect.go rename to yaegicore/basesymbols/reflect.go index d88982d619..0334d25e3c 100644 --- a/yaegicore/symbols/reflect.go +++ b/yaegicore/basesymbols/reflect.go @@ -3,7 +3,7 @@ //go:build go1.22 // +build go1.22 -package symbols +package basesymbols import ( "reflect" diff --git a/yaegicore/symbols/strconv.go b/yaegicore/basesymbols/strconv.go similarity index 99% rename from yaegicore/symbols/strconv.go rename to yaegicore/basesymbols/strconv.go index 99a1e3a547..1006c2efdc 100644 --- a/yaegicore/symbols/strconv.go +++ b/yaegicore/basesymbols/strconv.go @@ -3,7 +3,7 @@ //go:build go1.22 // +build go1.22 -package symbols +package basesymbols import ( "go/constant" diff --git a/yaegicore/symbols/strings.go b/yaegicore/basesymbols/strings.go similarity index 99% rename from yaegicore/symbols/strings.go rename to yaegicore/basesymbols/strings.go index 6d0eba0d58..74ef5edb7f 100644 --- a/yaegicore/symbols/strings.go +++ b/yaegicore/basesymbols/strings.go @@ -3,7 +3,7 @@ //go:build go1.22 // +build go1.22 -package symbols +package basesymbols import ( "reflect" diff --git a/yaegicore/basesymbols/symbols.go b/yaegicore/basesymbols/symbols.go new file mode 100644 index 0000000000..ca9680f1cd --- /dev/null +++ b/yaegicore/basesymbols/symbols.go @@ -0,0 +1,12 @@ +// Copyright (c) 2024, Cogent Core. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package basesymbols contains yaegi symbols for baseĀ (non-GUI) packages. +package basesymbols + +//go:generate ./make + +import "reflect" + +var Symbols = map[string]map[string]reflect.Value{} diff --git a/yaegicore/symbols/symbols_test.go b/yaegicore/basesymbols/symbols_test.go similarity index 91% rename from yaegicore/symbols/symbols_test.go rename to yaegicore/basesymbols/symbols_test.go index b91e1f3268..85b6175e67 100644 --- a/yaegicore/symbols/symbols_test.go +++ b/yaegicore/basesymbols/symbols_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package symbols +package basesymbols import "testing" diff --git a/yaegicore/symbols/time.go b/yaegicore/basesymbols/time.go similarity index 99% rename from yaegicore/symbols/time.go rename to yaegicore/basesymbols/time.go index 9bba72045c..800cd255e3 100644 --- a/yaegicore/symbols/time.go +++ b/yaegicore/basesymbols/time.go @@ -3,7 +3,7 @@ //go:build go1.22 // +build go1.22 -package symbols +package basesymbols import ( "go/constant" diff --git a/yaegicore/symbols/cogentcore_org-core-colors-gradient.go b/yaegicore/coresymbols/cogentcore_org-core-colors-gradient.go similarity index 99% rename from yaegicore/symbols/cogentcore_org-core-colors-gradient.go rename to yaegicore/coresymbols/cogentcore_org-core-colors-gradient.go index 7bb7c80348..ef5f5d231b 100644 --- a/yaegicore/symbols/cogentcore_org-core-colors-gradient.go +++ b/yaegicore/coresymbols/cogentcore_org-core-colors-gradient.go @@ -1,6 +1,6 @@ // Code generated by 'yaegi extract cogentcore.org/core/colors/gradient'. DO NOT EDIT. -package symbols +package coresymbols import ( "cogentcore.org/core/colors/gradient" diff --git a/yaegicore/symbols/cogentcore_org-core-colors.go b/yaegicore/coresymbols/cogentcore_org-core-colors.go similarity index 99% rename from yaegicore/symbols/cogentcore_org-core-colors.go rename to yaegicore/coresymbols/cogentcore_org-core-colors.go index c8915bb4e8..e5a86c2907 100644 --- a/yaegicore/symbols/cogentcore_org-core-colors.go +++ b/yaegicore/coresymbols/cogentcore_org-core-colors.go @@ -1,6 +1,6 @@ // Code generated by 'yaegi extract cogentcore.org/core/colors'. DO NOT EDIT. -package symbols +package coresymbols import ( "cogentcore.org/core/colors" diff --git a/yaegicore/symbols/cogentcore_org-core-core.go b/yaegicore/coresymbols/cogentcore_org-core-core.go similarity index 99% rename from yaegicore/symbols/cogentcore_org-core-core.go rename to yaegicore/coresymbols/cogentcore_org-core-core.go index c025a127fb..d2da78e8b2 100644 --- a/yaegicore/symbols/cogentcore_org-core-core.go +++ b/yaegicore/coresymbols/cogentcore_org-core-core.go @@ -1,6 +1,6 @@ // Code generated by 'yaegi extract cogentcore.org/core/core'. DO NOT EDIT. -package symbols +package coresymbols import ( "cogentcore.org/core/base/fileinfo/mimedata" @@ -305,6 +305,7 @@ func init() { "SystemSettingsData": reflect.ValueOf((*core.SystemSettingsData)(nil)), "Tab": reflect.ValueOf((*core.Tab)(nil)), "TabTypes": reflect.ValueOf((*core.TabTypes)(nil)), + "Tabber": reflect.ValueOf((*core.Tabber)(nil)), "Table": reflect.ValueOf((*core.Table)(nil)), "TableStyler": reflect.ValueOf((*core.TableStyler)(nil)), "Tabs": reflect.ValueOf((*core.Tabs)(nil)), @@ -340,6 +341,7 @@ func init() { "_SettingsOpener": reflect.ValueOf((*_cogentcore_org_core_core_SettingsOpener)(nil)), "_SettingsSaver": reflect.ValueOf((*_cogentcore_org_core_core_SettingsSaver)(nil)), "_ShouldDisplayer": reflect.ValueOf((*_cogentcore_org_core_core_ShouldDisplayer)(nil)), + "_Tabber": reflect.ValueOf((*_cogentcore_org_core_core_Tabber)(nil)), "_TextFieldEmbedder": reflect.ValueOf((*_cogentcore_org_core_core_TextFieldEmbedder)(nil)), "_ToolbarMaker": reflect.ValueOf((*_cogentcore_org_core_core_ToolbarMaker)(nil)), "_Treer": reflect.ValueOf((*_cogentcore_org_core_core_Treer)(nil)), @@ -599,6 +601,14 @@ func (W _cogentcore_org_core_core_ShouldDisplayer) ShouldDisplay(field string) b return W.WShouldDisplay(field) } +// _cogentcore_org_core_core_Tabber is an interface wrapper for Tabber type +type _cogentcore_org_core_core_Tabber struct { + IValue interface{} + WAsCoreTabs func() *core.Tabs +} + +func (W _cogentcore_org_core_core_Tabber) AsCoreTabs() *core.Tabs { return W.WAsCoreTabs() } + // _cogentcore_org_core_core_TextFieldEmbedder is an interface wrapper for TextFieldEmbedder type type _cogentcore_org_core_core_TextFieldEmbedder struct { IValue interface{} diff --git a/yaegicore/symbols/cogentcore_org-core-events.go b/yaegicore/coresymbols/cogentcore_org-core-events.go similarity index 99% rename from yaegicore/symbols/cogentcore_org-core-events.go rename to yaegicore/coresymbols/cogentcore_org-core-events.go index 6ed7d5a977..46664ff0eb 100644 --- a/yaegicore/symbols/cogentcore_org-core-events.go +++ b/yaegicore/coresymbols/cogentcore_org-core-events.go @@ -1,6 +1,6 @@ // Code generated by 'yaegi extract cogentcore.org/core/events'. DO NOT EDIT. -package symbols +package coresymbols import ( "cogentcore.org/core/enums" diff --git a/yaegicore/symbols/cogentcore_org-core-filetree.go b/yaegicore/coresymbols/cogentcore_org-core-filetree.go similarity index 93% rename from yaegicore/symbols/cogentcore_org-core-filetree.go rename to yaegicore/coresymbols/cogentcore_org-core-filetree.go index 442548dcc5..3a8ffd89be 100644 --- a/yaegicore/symbols/cogentcore_org-core-filetree.go +++ b/yaegicore/coresymbols/cogentcore_org-core-filetree.go @@ -1,6 +1,6 @@ // Code generated by 'yaegi extract cogentcore.org/core/filetree'. DO NOT EDIT. -package symbols +package coresymbols import ( "cogentcore.org/core/base/fileinfo/mimedata" @@ -18,6 +18,7 @@ func init() { Symbols["cogentcore.org/core/filetree/filetree"] = map[string]reflect.Value{ // function, constant and variable definitions "AsNode": reflect.ValueOf(filetree.AsNode), + "AsTree": reflect.ValueOf(filetree.AsTree), "FindLocationAll": reflect.ValueOf(filetree.FindLocationAll), "FindLocationDir": reflect.ValueOf(filetree.FindLocationDir), "FindLocationFile": reflect.ValueOf(filetree.FindLocationFile), @@ -41,11 +42,13 @@ func init() { "NodeNameCount": reflect.ValueOf((*filetree.NodeNameCount)(nil)), "SearchResults": reflect.ValueOf((*filetree.SearchResults)(nil)), "Tree": reflect.ValueOf((*filetree.Tree)(nil)), + "Treer": reflect.ValueOf((*filetree.Treer)(nil)), "VCSLog": reflect.ValueOf((*filetree.VCSLog)(nil)), // interface wrapper definitions "_Filer": reflect.ValueOf((*_cogentcore_org_core_filetree_Filer)(nil)), "_NodeEmbedder": reflect.ValueOf((*_cogentcore_org_core_filetree_NodeEmbedder)(nil)), + "_Treer": reflect.ValueOf((*_cogentcore_org_core_filetree_Treer)(nil)), } } @@ -142,3 +145,11 @@ type _cogentcore_org_core_filetree_NodeEmbedder struct { } func (W _cogentcore_org_core_filetree_NodeEmbedder) AsNode() *filetree.Node { return W.WAsNode() } + +// _cogentcore_org_core_filetree_Treer is an interface wrapper for Treer type +type _cogentcore_org_core_filetree_Treer struct { + IValue interface{} + WAsFileTree func() *filetree.Tree +} + +func (W _cogentcore_org_core_filetree_Treer) AsFileTree() *filetree.Tree { return W.WAsFileTree() } diff --git a/yaegicore/symbols/cogentcore_org-core-htmlcore.go b/yaegicore/coresymbols/cogentcore_org-core-htmlcore.go similarity index 98% rename from yaegicore/symbols/cogentcore_org-core-htmlcore.go rename to yaegicore/coresymbols/cogentcore_org-core-htmlcore.go index 735ed97f65..f83b46d461 100644 --- a/yaegicore/symbols/cogentcore_org-core-htmlcore.go +++ b/yaegicore/coresymbols/cogentcore_org-core-htmlcore.go @@ -1,6 +1,6 @@ // Code generated by 'yaegi extract cogentcore.org/core/htmlcore'. DO NOT EDIT. -package symbols +package coresymbols import ( "cogentcore.org/core/htmlcore" diff --git a/yaegicore/symbols/cogentcore_org-core-icons.go b/yaegicore/coresymbols/cogentcore_org-core-icons.go similarity index 99% rename from yaegicore/symbols/cogentcore_org-core-icons.go rename to yaegicore/coresymbols/cogentcore_org-core-icons.go index 5df0e73509..8b66e0e825 100644 --- a/yaegicore/symbols/cogentcore_org-core-icons.go +++ b/yaegicore/coresymbols/cogentcore_org-core-icons.go @@ -1,6 +1,6 @@ // Code generated by 'yaegi extract cogentcore.org/core/icons'. DO NOT EDIT. -package symbols +package coresymbols import ( "cogentcore.org/core/icons" diff --git a/yaegicore/symbols/cogentcore_org-core-keymap.go b/yaegicore/coresymbols/cogentcore_org-core-keymap.go similarity index 99% rename from yaegicore/symbols/cogentcore_org-core-keymap.go rename to yaegicore/coresymbols/cogentcore_org-core-keymap.go index 4370b3a1e4..76c6579578 100644 --- a/yaegicore/symbols/cogentcore_org-core-keymap.go +++ b/yaegicore/coresymbols/cogentcore_org-core-keymap.go @@ -1,6 +1,6 @@ // Code generated by 'yaegi extract cogentcore.org/core/keymap'. DO NOT EDIT. -package symbols +package coresymbols import ( "cogentcore.org/core/keymap" diff --git a/yaegicore/symbols/cogentcore_org-core-pages.go b/yaegicore/coresymbols/cogentcore_org-core-pages.go similarity index 96% rename from yaegicore/symbols/cogentcore_org-core-pages.go rename to yaegicore/coresymbols/cogentcore_org-core-pages.go index d579f118d9..edfc8a4d0a 100644 --- a/yaegicore/symbols/cogentcore_org-core-pages.go +++ b/yaegicore/coresymbols/cogentcore_org-core-pages.go @@ -1,6 +1,6 @@ // Code generated by 'yaegi extract cogentcore.org/core/pages'. DO NOT EDIT. -package symbols +package coresymbols import ( "cogentcore.org/core/pages" diff --git a/yaegicore/symbols/cogentcore_org-core-paint.go b/yaegicore/coresymbols/cogentcore_org-core-paint.go similarity index 99% rename from yaegicore/symbols/cogentcore_org-core-paint.go rename to yaegicore/coresymbols/cogentcore_org-core-paint.go index be313e8f6b..9d9b5613d6 100644 --- a/yaegicore/symbols/cogentcore_org-core-paint.go +++ b/yaegicore/coresymbols/cogentcore_org-core-paint.go @@ -1,6 +1,6 @@ // Code generated by 'yaegi extract cogentcore.org/core/paint'. DO NOT EDIT. -package symbols +package coresymbols import ( "cogentcore.org/core/paint" diff --git a/yaegicore/symbols/cogentcore_org-core-styles-abilities.go b/yaegicore/coresymbols/cogentcore_org-core-styles-abilities.go similarity index 98% rename from yaegicore/symbols/cogentcore_org-core-styles-abilities.go rename to yaegicore/coresymbols/cogentcore_org-core-styles-abilities.go index b573303275..c1219fb937 100644 --- a/yaegicore/symbols/cogentcore_org-core-styles-abilities.go +++ b/yaegicore/coresymbols/cogentcore_org-core-styles-abilities.go @@ -1,6 +1,6 @@ // Code generated by 'yaegi extract cogentcore.org/core/styles/abilities'. DO NOT EDIT. -package symbols +package coresymbols import ( "cogentcore.org/core/styles/abilities" diff --git a/yaegicore/symbols/cogentcore_org-core-styles-states.go b/yaegicore/coresymbols/cogentcore_org-core-styles-states.go similarity index 98% rename from yaegicore/symbols/cogentcore_org-core-styles-states.go rename to yaegicore/coresymbols/cogentcore_org-core-styles-states.go index 8c475bddc4..e8c265b692 100644 --- a/yaegicore/symbols/cogentcore_org-core-styles-states.go +++ b/yaegicore/coresymbols/cogentcore_org-core-styles-states.go @@ -1,6 +1,6 @@ // Code generated by 'yaegi extract cogentcore.org/core/styles/states'. DO NOT EDIT. -package symbols +package coresymbols import ( "cogentcore.org/core/styles/states" diff --git a/yaegicore/symbols/cogentcore_org-core-styles-units.go b/yaegicore/coresymbols/cogentcore_org-core-styles-units.go similarity index 99% rename from yaegicore/symbols/cogentcore_org-core-styles-units.go rename to yaegicore/coresymbols/cogentcore_org-core-styles-units.go index 48a192d11f..4af7681ff1 100644 --- a/yaegicore/symbols/cogentcore_org-core-styles-units.go +++ b/yaegicore/coresymbols/cogentcore_org-core-styles-units.go @@ -1,6 +1,6 @@ // Code generated by 'yaegi extract cogentcore.org/core/styles/units'. DO NOT EDIT. -package symbols +package coresymbols import ( "cogentcore.org/core/styles/units" diff --git a/yaegicore/symbols/cogentcore_org-core-styles.go b/yaegicore/coresymbols/cogentcore_org-core-styles.go similarity index 99% rename from yaegicore/symbols/cogentcore_org-core-styles.go rename to yaegicore/coresymbols/cogentcore_org-core-styles.go index 5a280593c4..e3d5021b7a 100644 --- a/yaegicore/symbols/cogentcore_org-core-styles.go +++ b/yaegicore/coresymbols/cogentcore_org-core-styles.go @@ -1,6 +1,6 @@ // Code generated by 'yaegi extract cogentcore.org/core/styles'. DO NOT EDIT. -package symbols +package coresymbols import ( "cogentcore.org/core/styles" diff --git a/yaegicore/symbols/cogentcore_org-core-texteditor.go b/yaegicore/coresymbols/cogentcore_org-core-texteditor.go similarity index 99% rename from yaegicore/symbols/cogentcore_org-core-texteditor.go rename to yaegicore/coresymbols/cogentcore_org-core-texteditor.go index 9cc548bb8f..be2381f802 100644 --- a/yaegicore/symbols/cogentcore_org-core-texteditor.go +++ b/yaegicore/coresymbols/cogentcore_org-core-texteditor.go @@ -1,6 +1,6 @@ // Code generated by 'yaegi extract cogentcore.org/core/texteditor'. DO NOT EDIT. -package symbols +package coresymbols import ( "cogentcore.org/core/texteditor" diff --git a/yaegicore/symbols/cogentcore_org-core-tree.go b/yaegicore/coresymbols/cogentcore_org-core-tree.go similarity index 99% rename from yaegicore/symbols/cogentcore_org-core-tree.go rename to yaegicore/coresymbols/cogentcore_org-core-tree.go index 91549f43f6..644ee9b051 100644 --- a/yaegicore/symbols/cogentcore_org-core-tree.go +++ b/yaegicore/coresymbols/cogentcore_org-core-tree.go @@ -1,6 +1,6 @@ // Code generated by 'yaegi extract cogentcore.org/core/tree'. DO NOT EDIT. -package symbols +package coresymbols import ( "cogentcore.org/core/tree" diff --git a/yaegicore/symbols/image-color.go b/yaegicore/coresymbols/image-color.go similarity index 99% rename from yaegicore/symbols/image-color.go rename to yaegicore/coresymbols/image-color.go index 1d4ae63d68..eaf0647e2e 100644 --- a/yaegicore/symbols/image-color.go +++ b/yaegicore/coresymbols/image-color.go @@ -3,7 +3,7 @@ //go:build go1.22 // +build go1.22 -package symbols +package coresymbols import ( "image/color" diff --git a/yaegicore/symbols/image-draw.go b/yaegicore/coresymbols/image-draw.go similarity index 99% rename from yaegicore/symbols/image-draw.go rename to yaegicore/coresymbols/image-draw.go index 1e1385c333..9e9a7fe4a9 100644 --- a/yaegicore/symbols/image-draw.go +++ b/yaegicore/coresymbols/image-draw.go @@ -3,7 +3,7 @@ //go:build go1.22 // +build go1.22 -package symbols +package coresymbols import ( "image" diff --git a/yaegicore/symbols/image.go b/yaegicore/coresymbols/image.go similarity index 99% rename from yaegicore/symbols/image.go rename to yaegicore/coresymbols/image.go index bd71dd8407..4f829034f2 100644 --- a/yaegicore/symbols/image.go +++ b/yaegicore/coresymbols/image.go @@ -3,7 +3,7 @@ //go:build go1.22 // +build go1.22 -package symbols +package coresymbols import ( "image" diff --git a/yaegicore/coresymbols/make b/yaegicore/coresymbols/make new file mode 100755 index 0000000000..e9d39184d3 --- /dev/null +++ b/yaegicore/coresymbols/make @@ -0,0 +1,12 @@ +#!/usr/bin/env goal + +command extract { + for _, pkg := range args { + yaegi extract {"cogentcore.org/core/"+pkg} + } +} + +yaegi extract image image/color image/draw + +extract core icons events styles styles/states styles/abilities styles/units tree keymap colors colors/gradient filetree texteditor htmlcore pages paint + diff --git a/yaegicore/symbols/symbols.go b/yaegicore/coresymbols/symbols.go similarity index 75% rename from yaegicore/symbols/symbols.go rename to yaegicore/coresymbols/symbols.go index 527a046cfc..56d890e469 100644 --- a/yaegicore/symbols/symbols.go +++ b/yaegicore/coresymbols/symbols.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package symbols contains yaegi symbols for core packages. -package symbols +// Package coresymbols contains yaegi symbols for core packages. +package coresymbols //go:generate ./make diff --git a/shell/interpreter/imports_test.go b/yaegicore/coresymbols/symbols_test.go similarity index 91% rename from shell/interpreter/imports_test.go rename to yaegicore/coresymbols/symbols_test.go index bcd4b74d81..9df410a0c2 100644 --- a/shell/interpreter/imports_test.go +++ b/yaegicore/coresymbols/symbols_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package interpreter +package coresymbols import "testing" diff --git a/yaegicore/symbols/cogentcore_org-core-plot-plotcore.go b/yaegicore/symbols/cogentcore_org-core-plot-plotcore.go deleted file mode 100644 index 41c461e613..0000000000 --- a/yaegicore/symbols/cogentcore_org-core-plot-plotcore.go +++ /dev/null @@ -1,34 +0,0 @@ -// Code generated by 'yaegi extract cogentcore.org/core/plot/plotcore'. DO NOT EDIT. - -package symbols - -import ( - "cogentcore.org/core/plot/plotcore" - "reflect" -) - -func init() { - Symbols["cogentcore.org/core/plot/plotcore/plotcore"] = map[string]reflect.Value{ - // function, constant and variable definitions - "Bar": reflect.ValueOf(plotcore.Bar), - "FixMax": reflect.ValueOf(plotcore.FixMax), - "FixMin": reflect.ValueOf(plotcore.FixMin), - "FloatMax": reflect.ValueOf(plotcore.FloatMax), - "FloatMin": reflect.ValueOf(plotcore.FloatMin), - "NewPlot": reflect.ValueOf(plotcore.NewPlot), - "NewPlotEditor": reflect.ValueOf(plotcore.NewPlotEditor), - "NewSubPlot": reflect.ValueOf(plotcore.NewSubPlot), - "Off": reflect.ValueOf(plotcore.Off), - "On": reflect.ValueOf(plotcore.On), - "PlotTypesN": reflect.ValueOf(plotcore.PlotTypesN), - "PlotTypesValues": reflect.ValueOf(plotcore.PlotTypesValues), - "XY": reflect.ValueOf(plotcore.XY), - - // type definitions - "ColumnOptions": reflect.ValueOf((*plotcore.ColumnOptions)(nil)), - "Plot": reflect.ValueOf((*plotcore.Plot)(nil)), - "PlotEditor": reflect.ValueOf((*plotcore.PlotEditor)(nil)), - "PlotOptions": reflect.ValueOf((*plotcore.PlotOptions)(nil)), - "PlotTypes": reflect.ValueOf((*plotcore.PlotTypes)(nil)), - } -} diff --git a/yaegicore/symbols/cogentcore_org-core-plot-plots.go b/yaegicore/symbols/cogentcore_org-core-plot-plots.go deleted file mode 100644 index 956dce326e..0000000000 --- a/yaegicore/symbols/cogentcore_org-core-plot-plots.go +++ /dev/null @@ -1,123 +0,0 @@ -// Code generated by 'yaegi extract cogentcore.org/core/plot/plots'. DO NOT EDIT. - -package symbols - -import ( - "cogentcore.org/core/plot/plots" - "reflect" -) - -func init() { - Symbols["cogentcore.org/core/plot/plots/plots"] = map[string]reflect.Value{ - // function, constant and variable definitions - "AddTableLine": reflect.ValueOf(plots.AddTableLine), - "AddTableLinePoints": reflect.ValueOf(plots.AddTableLinePoints), - "Box": reflect.ValueOf(plots.Box), - "Circle": reflect.ValueOf(plots.Circle), - "Cross": reflect.ValueOf(plots.Cross), - "DrawBox": reflect.ValueOf(plots.DrawBox), - "DrawCircle": reflect.ValueOf(plots.DrawCircle), - "DrawCross": reflect.ValueOf(plots.DrawCross), - "DrawPlus": reflect.ValueOf(plots.DrawPlus), - "DrawPyramid": reflect.ValueOf(plots.DrawPyramid), - "DrawRing": reflect.ValueOf(plots.DrawRing), - "DrawShape": reflect.ValueOf(plots.DrawShape), - "DrawSquare": reflect.ValueOf(plots.DrawSquare), - "DrawTriangle": reflect.ValueOf(plots.DrawTriangle), - "MidStep": reflect.ValueOf(plots.MidStep), - "NewBarChart": reflect.ValueOf(plots.NewBarChart), - "NewLabels": reflect.ValueOf(plots.NewLabels), - "NewLine": reflect.ValueOf(plots.NewLine), - "NewLinePoints": reflect.ValueOf(plots.NewLinePoints), - "NewScatter": reflect.ValueOf(plots.NewScatter), - "NewTableXYer": reflect.ValueOf(plots.NewTableXYer), - "NewXErrorBars": reflect.ValueOf(plots.NewXErrorBars), - "NewYErrorBars": reflect.ValueOf(plots.NewYErrorBars), - "NoStep": reflect.ValueOf(plots.NoStep), - "Plus": reflect.ValueOf(plots.Plus), - "PostStep": reflect.ValueOf(plots.PostStep), - "PreStep": reflect.ValueOf(plots.PreStep), - "Pyramid": reflect.ValueOf(plots.Pyramid), - "Ring": reflect.ValueOf(plots.Ring), - "ShapesN": reflect.ValueOf(plots.ShapesN), - "ShapesValues": reflect.ValueOf(plots.ShapesValues), - "Square": reflect.ValueOf(plots.Square), - "StepKindN": reflect.ValueOf(plots.StepKindN), - "StepKindValues": reflect.ValueOf(plots.StepKindValues), - "TableColumnIndex": reflect.ValueOf(plots.TableColumnIndex), - "Triangle": reflect.ValueOf(plots.Triangle), - - // type definitions - "BarChart": reflect.ValueOf((*plots.BarChart)(nil)), - "Errors": reflect.ValueOf((*plots.Errors)(nil)), - "Labels": reflect.ValueOf((*plots.Labels)(nil)), - "Line": reflect.ValueOf((*plots.Line)(nil)), - "Scatter": reflect.ValueOf((*plots.Scatter)(nil)), - "Shapes": reflect.ValueOf((*plots.Shapes)(nil)), - "StepKind": reflect.ValueOf((*plots.StepKind)(nil)), - "Table": reflect.ValueOf((*plots.Table)(nil)), - "TableXYer": reflect.ValueOf((*plots.TableXYer)(nil)), - "XErrorBars": reflect.ValueOf((*plots.XErrorBars)(nil)), - "XErrorer": reflect.ValueOf((*plots.XErrorer)(nil)), - "XErrors": reflect.ValueOf((*plots.XErrors)(nil)), - "XYLabeler": reflect.ValueOf((*plots.XYLabeler)(nil)), - "XYLabels": reflect.ValueOf((*plots.XYLabels)(nil)), - "YErrorBars": reflect.ValueOf((*plots.YErrorBars)(nil)), - "YErrorer": reflect.ValueOf((*plots.YErrorer)(nil)), - "YErrors": reflect.ValueOf((*plots.YErrors)(nil)), - - // interface wrapper definitions - "_Table": reflect.ValueOf((*_cogentcore_org_core_plot_plots_Table)(nil)), - "_XErrorer": reflect.ValueOf((*_cogentcore_org_core_plot_plots_XErrorer)(nil)), - "_XYLabeler": reflect.ValueOf((*_cogentcore_org_core_plot_plots_XYLabeler)(nil)), - "_YErrorer": reflect.ValueOf((*_cogentcore_org_core_plot_plots_YErrorer)(nil)), - } -} - -// _cogentcore_org_core_plot_plots_Table is an interface wrapper for Table type -type _cogentcore_org_core_plot_plots_Table struct { - IValue interface{} - WColumnName func(i int) string - WNumColumns func() int - WNumRows func() int - WPlotData func(column int, row int) float32 -} - -func (W _cogentcore_org_core_plot_plots_Table) ColumnName(i int) string { return W.WColumnName(i) } -func (W _cogentcore_org_core_plot_plots_Table) NumColumns() int { return W.WNumColumns() } -func (W _cogentcore_org_core_plot_plots_Table) NumRows() int { return W.WNumRows() } -func (W _cogentcore_org_core_plot_plots_Table) PlotData(column int, row int) float32 { - return W.WPlotData(column, row) -} - -// _cogentcore_org_core_plot_plots_XErrorer is an interface wrapper for XErrorer type -type _cogentcore_org_core_plot_plots_XErrorer struct { - IValue interface{} - WXError func(i int) (low float32, high float32) -} - -func (W _cogentcore_org_core_plot_plots_XErrorer) XError(i int) (low float32, high float32) { - return W.WXError(i) -} - -// _cogentcore_org_core_plot_plots_XYLabeler is an interface wrapper for XYLabeler type -type _cogentcore_org_core_plot_plots_XYLabeler struct { - IValue interface{} - WLabel func(i int) string - WLen func() int - WXY func(i int) (x float32, y float32) -} - -func (W _cogentcore_org_core_plot_plots_XYLabeler) Label(i int) string { return W.WLabel(i) } -func (W _cogentcore_org_core_plot_plots_XYLabeler) Len() int { return W.WLen() } -func (W _cogentcore_org_core_plot_plots_XYLabeler) XY(i int) (x float32, y float32) { return W.WXY(i) } - -// _cogentcore_org_core_plot_plots_YErrorer is an interface wrapper for YErrorer type -type _cogentcore_org_core_plot_plots_YErrorer struct { - IValue interface{} - WYError func(i int) (float32, float32) -} - -func (W _cogentcore_org_core_plot_plots_YErrorer) YError(i int) (float32, float32) { - return W.WYError(i) -} diff --git a/yaegicore/symbols/cogentcore_org-core-plot.go b/yaegicore/symbols/cogentcore_org-core-plot.go deleted file mode 100644 index 6e2b8b230f..0000000000 --- a/yaegicore/symbols/cogentcore_org-core-plot.go +++ /dev/null @@ -1,164 +0,0 @@ -// Code generated by 'yaegi extract cogentcore.org/core/plot'. DO NOT EDIT. - -package symbols - -import ( - "cogentcore.org/core/plot" - "reflect" -) - -func init() { - Symbols["cogentcore.org/core/plot/plot"] = map[string]reflect.Value{ - // function, constant and variable definitions - "CheckFloats": reflect.ValueOf(plot.CheckFloats), - "CheckNaNs": reflect.ValueOf(plot.CheckNaNs), - "CopyValues": reflect.ValueOf(plot.CopyValues), - "CopyXYZs": reflect.ValueOf(plot.CopyXYZs), - "CopyXYs": reflect.ValueOf(plot.CopyXYs), - "DefaultFontFamily": reflect.ValueOf(&plot.DefaultFontFamily).Elem(), - "ErrInfinity": reflect.ValueOf(&plot.ErrInfinity).Elem(), - "ErrNoData": reflect.ValueOf(&plot.ErrNoData).Elem(), - "New": reflect.ValueOf(plot.New), - "PlotXYs": reflect.ValueOf(plot.PlotXYs), - "Range": reflect.ValueOf(plot.Range), - "UTCUnixTime": reflect.ValueOf(&plot.UTCUnixTime).Elem(), - "UnixTimeIn": reflect.ValueOf(plot.UnixTimeIn), - "XYRange": reflect.ValueOf(plot.XYRange), - - // type definitions - "Axis": reflect.ValueOf((*plot.Axis)(nil)), - "ConstantTicks": reflect.ValueOf((*plot.ConstantTicks)(nil)), - "DataRanger": reflect.ValueOf((*plot.DataRanger)(nil)), - "DefaultTicks": reflect.ValueOf((*plot.DefaultTicks)(nil)), - "InvertedScale": reflect.ValueOf((*plot.InvertedScale)(nil)), - "Labeler": reflect.ValueOf((*plot.Labeler)(nil)), - "Legend": reflect.ValueOf((*plot.Legend)(nil)), - "LegendEntry": reflect.ValueOf((*plot.LegendEntry)(nil)), - "LegendPosition": reflect.ValueOf((*plot.LegendPosition)(nil)), - "LineStyle": reflect.ValueOf((*plot.LineStyle)(nil)), - "LinearScale": reflect.ValueOf((*plot.LinearScale)(nil)), - "LogScale": reflect.ValueOf((*plot.LogScale)(nil)), - "LogTicks": reflect.ValueOf((*plot.LogTicks)(nil)), - "Normalizer": reflect.ValueOf((*plot.Normalizer)(nil)), - "Plot": reflect.ValueOf((*plot.Plot)(nil)), - "Plotter": reflect.ValueOf((*plot.Plotter)(nil)), - "Text": reflect.ValueOf((*plot.Text)(nil)), - "TextStyle": reflect.ValueOf((*plot.TextStyle)(nil)), - "Thumbnailer": reflect.ValueOf((*plot.Thumbnailer)(nil)), - "Tick": reflect.ValueOf((*plot.Tick)(nil)), - "Ticker": reflect.ValueOf((*plot.Ticker)(nil)), - "TickerFunc": reflect.ValueOf((*plot.TickerFunc)(nil)), - "TimeTicks": reflect.ValueOf((*plot.TimeTicks)(nil)), - "Valuer": reflect.ValueOf((*plot.Valuer)(nil)), - "Values": reflect.ValueOf((*plot.Values)(nil)), - "XValues": reflect.ValueOf((*plot.XValues)(nil)), - "XYValues": reflect.ValueOf((*plot.XYValues)(nil)), - "XYZ": reflect.ValueOf((*plot.XYZ)(nil)), - "XYZer": reflect.ValueOf((*plot.XYZer)(nil)), - "XYZs": reflect.ValueOf((*plot.XYZs)(nil)), - "XYer": reflect.ValueOf((*plot.XYer)(nil)), - "XYs": reflect.ValueOf((*plot.XYs)(nil)), - "YValues": reflect.ValueOf((*plot.YValues)(nil)), - - // interface wrapper definitions - "_DataRanger": reflect.ValueOf((*_cogentcore_org_core_plot_DataRanger)(nil)), - "_Labeler": reflect.ValueOf((*_cogentcore_org_core_plot_Labeler)(nil)), - "_Normalizer": reflect.ValueOf((*_cogentcore_org_core_plot_Normalizer)(nil)), - "_Plotter": reflect.ValueOf((*_cogentcore_org_core_plot_Plotter)(nil)), - "_Thumbnailer": reflect.ValueOf((*_cogentcore_org_core_plot_Thumbnailer)(nil)), - "_Ticker": reflect.ValueOf((*_cogentcore_org_core_plot_Ticker)(nil)), - "_Valuer": reflect.ValueOf((*_cogentcore_org_core_plot_Valuer)(nil)), - "_XYZer": reflect.ValueOf((*_cogentcore_org_core_plot_XYZer)(nil)), - "_XYer": reflect.ValueOf((*_cogentcore_org_core_plot_XYer)(nil)), - } -} - -// _cogentcore_org_core_plot_DataRanger is an interface wrapper for DataRanger type -type _cogentcore_org_core_plot_DataRanger struct { - IValue interface{} - WDataRange func(pt *plot.Plot) (xmin float32, xmax float32, ymin float32, ymax float32) -} - -func (W _cogentcore_org_core_plot_DataRanger) DataRange(pt *plot.Plot) (xmin float32, xmax float32, ymin float32, ymax float32) { - return W.WDataRange(pt) -} - -// _cogentcore_org_core_plot_Labeler is an interface wrapper for Labeler type -type _cogentcore_org_core_plot_Labeler struct { - IValue interface{} - WLabel func(i int) string -} - -func (W _cogentcore_org_core_plot_Labeler) Label(i int) string { return W.WLabel(i) } - -// _cogentcore_org_core_plot_Normalizer is an interface wrapper for Normalizer type -type _cogentcore_org_core_plot_Normalizer struct { - IValue interface{} - WNormalize func(min float32, max float32, x float32) float32 -} - -func (W _cogentcore_org_core_plot_Normalizer) Normalize(min float32, max float32, x float32) float32 { - return W.WNormalize(min, max, x) -} - -// _cogentcore_org_core_plot_Plotter is an interface wrapper for Plotter type -type _cogentcore_org_core_plot_Plotter struct { - IValue interface{} - WPlot func(pt *plot.Plot) - WXYData func() (data plot.XYer, pixels plot.XYer) -} - -func (W _cogentcore_org_core_plot_Plotter) Plot(pt *plot.Plot) { W.WPlot(pt) } -func (W _cogentcore_org_core_plot_Plotter) XYData() (data plot.XYer, pixels plot.XYer) { - return W.WXYData() -} - -// _cogentcore_org_core_plot_Thumbnailer is an interface wrapper for Thumbnailer type -type _cogentcore_org_core_plot_Thumbnailer struct { - IValue interface{} - WThumbnail func(pt *plot.Plot) -} - -func (W _cogentcore_org_core_plot_Thumbnailer) Thumbnail(pt *plot.Plot) { W.WThumbnail(pt) } - -// _cogentcore_org_core_plot_Ticker is an interface wrapper for Ticker type -type _cogentcore_org_core_plot_Ticker struct { - IValue interface{} - WTicks func(min float32, max float32) []plot.Tick -} - -func (W _cogentcore_org_core_plot_Ticker) Ticks(min float32, max float32) []plot.Tick { - return W.WTicks(min, max) -} - -// _cogentcore_org_core_plot_Valuer is an interface wrapper for Valuer type -type _cogentcore_org_core_plot_Valuer struct { - IValue interface{} - WLen func() int - WValue func(i int) float32 -} - -func (W _cogentcore_org_core_plot_Valuer) Len() int { return W.WLen() } -func (W _cogentcore_org_core_plot_Valuer) Value(i int) float32 { return W.WValue(i) } - -// _cogentcore_org_core_plot_XYZer is an interface wrapper for XYZer type -type _cogentcore_org_core_plot_XYZer struct { - IValue interface{} - WLen func() int - WXY func(i int) (float32, float32) - WXYZ func(i int) (float32, float32, float32) -} - -func (W _cogentcore_org_core_plot_XYZer) Len() int { return W.WLen() } -func (W _cogentcore_org_core_plot_XYZer) XY(i int) (float32, float32) { return W.WXY(i) } -func (W _cogentcore_org_core_plot_XYZer) XYZ(i int) (float32, float32, float32) { return W.WXYZ(i) } - -// _cogentcore_org_core_plot_XYer is an interface wrapper for XYer type -type _cogentcore_org_core_plot_XYer struct { - IValue interface{} - WLen func() int - WXY func(i int) (x float32, y float32) -} - -func (W _cogentcore_org_core_plot_XYer) Len() int { return W.WLen() } -func (W _cogentcore_org_core_plot_XYer) XY(i int) (x float32, y float32) { return W.WXY(i) } diff --git a/yaegicore/symbols/cogentcore_org-core-tensor-table.go b/yaegicore/symbols/cogentcore_org-core-tensor-table.go deleted file mode 100644 index 1fd38dbb65..0000000000 --- a/yaegicore/symbols/cogentcore_org-core-tensor-table.go +++ /dev/null @@ -1,53 +0,0 @@ -// Code generated by 'yaegi extract cogentcore.org/core/tensor/table'. DO NOT EDIT. - -package symbols - -import ( - "cogentcore.org/core/tensor/table" - "reflect" -) - -func init() { - Symbols["cogentcore.org/core/tensor/table/table"] = map[string]reflect.Value{ - // function, constant and variable definitions - "AddAggName": reflect.ValueOf(table.AddAggName), - "Ascending": reflect.ValueOf(table.Ascending), - "ColumnNameOnly": reflect.ValueOf(table.ColumnNameOnly), - "Comma": reflect.ValueOf(table.Comma), - "ConfigFromDataValues": reflect.ValueOf(table.ConfigFromDataValues), - "ConfigFromHeaders": reflect.ValueOf(table.ConfigFromHeaders), - "ConfigFromTableHeaders": reflect.ValueOf(table.ConfigFromTableHeaders), - "Contains": reflect.ValueOf(table.Contains), - "DelimsN": reflect.ValueOf(table.DelimsN), - "DelimsValues": reflect.ValueOf(table.DelimsValues), - "Descending": reflect.ValueOf(table.Descending), - "Detect": reflect.ValueOf(table.Detect), - "DetectTableHeaders": reflect.ValueOf(table.DetectTableHeaders), - "Equals": reflect.ValueOf(table.Equals), - "Headers": reflect.ValueOf(table.Headers), - "IgnoreCase": reflect.ValueOf(table.IgnoreCase), - "InferDataType": reflect.ValueOf(table.InferDataType), - "NewIndexView": reflect.ValueOf(table.NewIndexView), - "NewSliceTable": reflect.ValueOf(table.NewSliceTable), - "NewTable": reflect.ValueOf(table.NewTable), - "NoHeaders": reflect.ValueOf(table.NoHeaders), - "ShapeFromString": reflect.ValueOf(table.ShapeFromString), - "Space": reflect.ValueOf(table.Space), - "Tab": reflect.ValueOf(table.Tab), - "TableColumnType": reflect.ValueOf(table.TableColumnType), - "TableHeaderChar": reflect.ValueOf(table.TableHeaderChar), - "TableHeaderToType": reflect.ValueOf(&table.TableHeaderToType).Elem(), - "UpdateSliceTable": reflect.ValueOf(table.UpdateSliceTable), - "UseCase": reflect.ValueOf(table.UseCase), - - // type definitions - "Delims": reflect.ValueOf((*table.Delims)(nil)), - "Filterer": reflect.ValueOf((*table.Filterer)(nil)), - "IndexView": reflect.ValueOf((*table.IndexView)(nil)), - "LessFunc": reflect.ValueOf((*table.LessFunc)(nil)), - "SplitAgg": reflect.ValueOf((*table.SplitAgg)(nil)), - "Splits": reflect.ValueOf((*table.Splits)(nil)), - "SplitsLessFunc": reflect.ValueOf((*table.SplitsLessFunc)(nil)), - "Table": reflect.ValueOf((*table.Table)(nil)), - } -} diff --git a/yaegicore/symbols/make b/yaegicore/symbols/make deleted file mode 100755 index 67bc088dec..0000000000 --- a/yaegicore/symbols/make +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env cosh - -yaegi extract fmt strconv strings image image/color image/draw time log/slog reflect - -command extract { - for _, pkg := range args { - yaegi extract {"cogentcore.org/core/"+pkg} - } -} - -extract core icons events styles styles/states styles/abilities styles/units tree keymap colors colors/gradient filetree texteditor htmlcore pages paint math32 plot plot/plots plot/plotcore tensor/table base/errors base/fsx base/reflectx base/labels base/fileinfo diff --git a/yaegicore/yaegicore.go b/yaegicore/yaegicore.go index 24178dbba9..559b76be49 100644 --- a/yaegicore/yaegicore.go +++ b/yaegicore/yaegicore.go @@ -17,7 +17,8 @@ import ( "cogentcore.org/core/events" "cogentcore.org/core/htmlcore" "cogentcore.org/core/texteditor" - "cogentcore.org/core/yaegicore/symbols" + "cogentcore.org/core/yaegicore/basesymbols" + "cogentcore.org/core/yaegicore/coresymbols" "github.com/cogentcore/yaegi/interp" ) @@ -25,7 +26,8 @@ var autoPlanNameCounter uint64 func init() { htmlcore.BindTextEditor = BindTextEditor - symbols.Symbols["."] = map[string]reflect.Value{} // make "." available for use + coresymbols.Symbols["."] = map[string]reflect.Value{} // make "." available for use + basesymbols.Symbols["."] = map[string]reflect.Value{} // make "." available for use } // BindTextEditor binds the given text editor to a yaegi interpreter @@ -36,12 +38,13 @@ func BindTextEditor(ed *texteditor.Editor, parent core.Widget) { oc := func() { in := interp.New(interp.Options{}) core.ExternalParent = parent - symbols.Symbols["."]["b"] = reflect.ValueOf(parent) + coresymbols.Symbols["."]["b"] = reflect.ValueOf(parent) // the normal AutoPlanName cannot be used because the stack trace in yaegi is not helpful - symbols.Symbols["cogentcore.org/core/tree/tree"]["AutoPlanName"] = reflect.ValueOf(func(int) string { + coresymbols.Symbols["cogentcore.org/core/tree/tree"]["AutoPlanName"] = reflect.ValueOf(func(int) string { return fmt.Sprintf("yaegi-%v", atomic.AddUint64(&autoPlanNameCounter, 1)) }) - errors.Log(in.Use(symbols.Symbols)) + errors.Log(in.Use(basesymbols.Symbols)) + errors.Log(in.Use(coresymbols.Symbols)) in.ImportUsed() parent.AsTree().DeleteChildren()