summaryrefslogtreecommitdiff
path: root/internal
diff options
context:
space:
mode:
authorDave Henderson <dhenderson@gmail.com>2024-01-22 09:06:33 -0500
committerGitHub <noreply@github.com>2024-01-22 09:06:33 -0500
commit0ac3aa24bf2e4ada9c26fd9ef5b7f0ae8c6b6cfb (patch)
tree9a95f27eec1e77ef8bfefcb2810f7e41681627a5 /internal
parentf837061f953bda1e8b42095c6dba0496de11d993 (diff)
Use go-fsimpl to read from datasources (#1336)
* Use go-fsimpl to read from datasources Signed-off-by: Dave Henderson <dhenderson@gmail.com> * trying to fix windows bug Signed-off-by: Dave Henderson <dhenderson@gmail.com> * attempts to fix some of the path madness Signed-off-by: Dave Henderson <dhenderson@gmail.com> * remove 'HOME' from expected env vars Signed-off-by: Dave Henderson <dhenderson@gmail.com> * more tweaks Signed-off-by: Dave Henderson <dhenderson@gmail.com> * lint fix Signed-off-by: Dave Henderson <dhenderson@gmail.com> --------- Signed-off-by: Dave Henderson <dhenderson@gmail.com>
Diffstat (limited to 'internal')
-rw-r--r--internal/cmd/config.go2
-rw-r--r--internal/cmd/main.go10
-rw-r--r--internal/config/configfile.go6
-rw-r--r--internal/config/types.go6
-rw-r--r--internal/datafs/context.go45
-rw-r--r--internal/datafs/envfs.go221
-rw-r--r--internal/datafs/envfs_test.go140
-rw-r--r--internal/datafs/fsurl.go42
-rw-r--r--internal/datafs/fsurl_test.go106
-rw-r--r--internal/datafs/fsys.go98
-rw-r--r--internal/datafs/fsys_test.go98
-rw-r--r--internal/datafs/getenv.go51
-rw-r--r--internal/datafs/getenv_test.go97
-rw-r--r--internal/datafs/mergefs.go280
-rw-r--r--internal/datafs/mergefs_test.go358
-rw-r--r--internal/datafs/stdinfs.go110
-rw-r--r--internal/datafs/stdinfs_test.go109
-rw-r--r--internal/datafs/vaultauth.go89
-rw-r--r--internal/datafs/wdfs.go100
-rw-r--r--internal/datafs/wdfs_test.go35
-rw-r--r--internal/deprecated/deprecated.go7
-rw-r--r--internal/iohelpers/mimetypes.go33
-rw-r--r--internal/iohelpers/write_test.go71
-rw-r--r--internal/iohelpers/writers_test.go86
-rw-r--r--internal/parsers/parsefuncs.go528
-rw-r--r--internal/parsers/parsefuncs_test.go777
-rw-r--r--internal/parsers/parser.go39
-rw-r--r--internal/tests/integration/basic_test.go27
-rw-r--r--internal/tests/integration/datasources_consul_test.go19
-rw-r--r--internal/tests/integration/datasources_file_test.go3
-rw-r--r--internal/tests/integration/datasources_git_test.go4
-rw-r--r--internal/tests/integration/datasources_vault_ec2_test.go9
-rw-r--r--internal/tests/integration/datasources_vault_test.go37
-rw-r--r--internal/tests/integration/integration_test.go4
-rw-r--r--internal/urlhelpers/urlhelpers.go46
-rw-r--r--internal/urlhelpers/urlhelpers_test.go42
36 files changed, 3506 insertions, 229 deletions
diff --git a/internal/cmd/config.go b/internal/cmd/config.go
index 7f071417..7dc53911 100644
--- a/internal/cmd/config.go
+++ b/internal/cmd/config.go
@@ -73,7 +73,7 @@ func readConfigFile(ctx context.Context, cmd *cobra.Command) (cfg *config.Config
// we only support loading configs from the local filesystem for now
fsys, err := datafs.FSysForPath(ctx, cfgFile)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("fsys for path %v: %w", cfgFile, err)
}
f, err := fsys.Open(cfgFile)
diff --git a/internal/cmd/main.go b/internal/cmd/main.go
index 3cce83e2..7a3dea98 100644
--- a/internal/cmd/main.go
+++ b/internal/cmd/main.go
@@ -7,7 +7,6 @@ import (
"os/exec"
"os/signal"
- "github.com/hairyhenderson/go-fsimpl"
"github.com/hairyhenderson/gomplate/v4"
"github.com/hairyhenderson/gomplate/v4/env"
"github.com/hairyhenderson/gomplate/v4/internal/datafs"
@@ -169,13 +168,10 @@ func InitFlags(command *cobra.Command) {
func Main(ctx context.Context, args []string, stdin io.Reader, stdout, stderr io.Writer) error {
ctx = initLogger(ctx, stderr)
- // inject a default filesystem provider for file:// URLs
+ // inject default filesystem provider if it hasn't already been provided in
+ // the context
if datafs.FSProviderFromContext(ctx) == nil {
- // TODO: expand this to support other schemes!
- mux := fsimpl.NewMux()
- mux.Add(datafs.WdFS)
-
- ctx = datafs.ContextWithFSProvider(ctx, mux)
+ ctx = datafs.ContextWithFSProvider(ctx, gomplate.DefaultFSProvider)
}
command := NewGomplateCmd()
diff --git a/internal/config/configfile.go b/internal/config/configfile.go
index 1ed266b3..1fcccc20 100644
--- a/internal/config/configfile.go
+++ b/internal/config/configfile.go
@@ -15,8 +15,8 @@ import (
"golang.org/x/exp/slices"
- "github.com/hairyhenderson/gomplate/v4/internal/datafs"
"github.com/hairyhenderson/gomplate/v4/internal/iohelpers"
+ "github.com/hairyhenderson/gomplate/v4/internal/urlhelpers"
"github.com/hairyhenderson/yaml"
)
@@ -115,7 +115,7 @@ func (d *DataSource) UnmarshalYAML(value *yaml.Node) error {
if err != nil {
return err
}
- u, err := datafs.ParseSourceURL(r.URL)
+ u, err := urlhelpers.ParseSourceURL(r.URL)
if err != nil {
return fmt.Errorf("could not parse datasource URL %q: %w", r.URL, err)
}
@@ -378,7 +378,7 @@ func parseDatasourceArg(value string) (alias string, ds DataSource, err error) {
}
}
- ds.URL, err = datafs.ParseSourceURL(u)
+ ds.URL, err = urlhelpers.ParseSourceURL(u)
return alias, ds, err
}
diff --git a/internal/config/types.go b/internal/config/types.go
index 57901f0d..648ad2b9 100644
--- a/internal/config/types.go
+++ b/internal/config/types.go
@@ -5,7 +5,7 @@ import (
"net/http"
"strings"
- "github.com/hairyhenderson/gomplate/v4/internal/datafs"
+ "github.com/hairyhenderson/gomplate/v4/internal/urlhelpers"
"github.com/hairyhenderson/yaml"
)
@@ -53,7 +53,7 @@ func (t *Templates) unmarshalYAMLArray(value *yaml.Node) error {
pth = alias
}
- u, err := datafs.ParseSourceURL(pth)
+ u, err := urlhelpers.ParseSourceURL(pth)
if err != nil {
return fmt.Errorf("could not parse template URL %q: %w", pth, err)
}
@@ -90,7 +90,7 @@ func parseTemplateArg(value string) (alias string, ds DataSource, err error) {
u = alias
}
- ds.URL, err = datafs.ParseSourceURL(u)
+ ds.URL, err = urlhelpers.ParseSourceURL(u)
return alias, ds, err
}
diff --git a/internal/datafs/context.go b/internal/datafs/context.go
new file mode 100644
index 00000000..7f1235bf
--- /dev/null
+++ b/internal/datafs/context.go
@@ -0,0 +1,45 @@
+package datafs
+
+import (
+ "context"
+ "io"
+ "io/fs"
+ "os"
+
+ "github.com/hairyhenderson/gomplate/v4/internal/config"
+)
+
+// withContexter is an fs.FS that can be configured with a custom context
+// copied from go-fsimpl - see internal/types.go
+type withContexter interface {
+ WithContext(ctx context.Context) fs.FS
+}
+
+type withDataSourceser interface {
+ WithDataSources(sources map[string]config.DataSource) fs.FS
+}
+
+// WithDataSourcesFS injects a datasource map into the filesystem fs, if the
+// filesystem supports it (i.e. has a WithDataSources method). This is used for
+// the mergefs filesystem.
+func WithDataSourcesFS(sources map[string]config.DataSource, fsys fs.FS) fs.FS {
+ if fsys, ok := fsys.(withDataSourceser); ok {
+ return fsys.WithDataSources(sources)
+ }
+
+ return fsys
+}
+
+type stdinCtxKey struct{}
+
+func ContextWithStdin(ctx context.Context, r io.Reader) context.Context {
+ return context.WithValue(ctx, stdinCtxKey{}, r)
+}
+
+func StdinFromContext(ctx context.Context) io.Reader {
+ if r, ok := ctx.Value(stdinCtxKey{}).(io.Reader); ok {
+ return r
+ }
+
+ return os.Stdin
+}
diff --git a/internal/datafs/envfs.go b/internal/datafs/envfs.go
new file mode 100644
index 00000000..41b3928f
--- /dev/null
+++ b/internal/datafs/envfs.go
@@ -0,0 +1,221 @@
+package datafs
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/fs"
+ "net/url"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/hairyhenderson/go-fsimpl"
+)
+
+// NewEnvFS returns a filesystem (an fs.FS) that can be used to read data from
+// environment variables.
+func NewEnvFS(_ *url.URL) (fs.FS, error) {
+ return &envFS{locfs: os.DirFS("/")}, nil
+}
+
+type envFS struct {
+ locfs fs.FS
+}
+
+//nolint:gochecknoglobals
+var EnvFS = fsimpl.FSProviderFunc(NewEnvFS, "env")
+
+var _ fs.FS = (*envFS)(nil)
+
+func (f *envFS) Open(name string) (fs.File, error) {
+ if !fs.ValidPath(name) {
+ return nil, &fs.PathError{
+ Op: "open",
+ Path: name,
+ Err: fs.ErrInvalid,
+ }
+ }
+
+ return &envFile{locfs: f.locfs, name: name}, nil
+}
+
+type envFile struct {
+ locfs fs.FS
+ body io.Reader
+ name string
+
+ dirents []fs.DirEntry
+ diroff int
+}
+
+var (
+ _ fs.File = (*envFile)(nil)
+ _ fs.ReadDirFile = (*envFile)(nil)
+)
+
+// overridable env functions
+var (
+ lookupEnv = os.LookupEnv
+ environ = os.Environ
+)
+
+func (e *envFile) Close() error {
+ e.body = nil
+ return nil
+}
+
+func (e *envFile) envReader() (int, io.Reader, error) {
+ v, found := lookupEnv(e.name)
+ if found {
+ return len(v), bytes.NewBufferString(v), nil
+ }
+
+ fname, found := lookupEnv(e.name + "_FILE")
+ if found && fname != "" {
+ fname = strings.TrimPrefix(fname, "/")
+
+ b, err := fs.ReadFile(e.locfs, fname)
+ if err != nil {
+ return 0, nil, err
+ }
+
+ b = bytes.TrimSpace(b)
+
+ return len(b), bytes.NewBuffer(b), nil
+ }
+
+ return 0, nil, fs.ErrNotExist
+}
+
+func (e *envFile) Stat() (fs.FileInfo, error) {
+ n, _, err := e.envReader()
+ if err != nil {
+ return nil, err
+ }
+
+ return FileInfo(e.name, int64(n), 0o444, time.Time{}, ""), nil
+}
+
+func (e *envFile) Read(p []byte) (int, error) {
+ if e.body == nil {
+ _, r, err := e.envReader()
+ if err != nil {
+ return 0, err
+ }
+ e.body = r
+ }
+
+ return e.body.Read(p)
+}
+
+func (e *envFile) ReadDir(n int) ([]fs.DirEntry, error) {
+ // envFS has no concept of subdirectories, but we can support a root
+ // directory by listing all environment variables.
+ if e.name != "." {
+ return nil, fmt.Errorf("%s: not a directory", e.name)
+ }
+
+ if e.dirents == nil {
+ envs := environ()
+ e.dirents = make([]fs.DirEntry, 0, len(envs))
+ for _, env := range envs {
+ parts := strings.SplitN(env, "=", 2)
+ name, value := parts[0], parts[1]
+
+ if name == "" {
+ // this might be a Windows =C: style env var, so skip it
+ continue
+ }
+
+ e.dirents = append(e.dirents, FileInfoDirEntry(
+ FileInfo(name, int64(len(value)), 0o444, time.Time{}, ""),
+ ))
+ }
+ }
+
+ if n > 0 && e.diroff >= len(e.dirents) {
+ return nil, io.EOF
+ }
+
+ low := e.diroff
+ high := e.diroff + n
+
+ // clamp high at the max, and ensure it's higher than low
+ if high >= len(e.dirents) || high <= low {
+ high = len(e.dirents)
+ }
+
+ entries := make([]fs.DirEntry, high-low)
+ copy(entries, e.dirents[e.diroff:])
+
+ e.diroff = high
+
+ return entries, nil
+}
+
+// FileInfo/DirInfo/FileInfoDirEntry/etc are taken from go-fsimpl's internal
+// package, and may be exported in the future...
+
+// FileInfo creates a static fs.FileInfo with the given properties.
+// The result is also a fs.DirEntry and can be safely cast.
+func FileInfo(name string, size int64, mode fs.FileMode, modTime time.Time, contentType string) fs.FileInfo {
+ return &staticFileInfo{
+ name: name,
+ size: size,
+ mode: mode,
+ modTime: modTime,
+ contentType: contentType,
+ }
+}
+
+// DirInfo creates a fs.FileInfo for a directory with the given name. Use
+// FileInfo to set other values.
+func DirInfo(name string, modTime time.Time) fs.FileInfo {
+ return FileInfo(name, 0, fs.ModeDir, modTime, "")
+}
+
+type staticFileInfo struct {
+ modTime time.Time
+ name string
+ contentType string
+ size int64
+ mode fs.FileMode
+}
+
+var (
+ _ fs.FileInfo = (*staticFileInfo)(nil)
+ _ fs.DirEntry = (*staticFileInfo)(nil)
+)
+
+func (fi staticFileInfo) ContentType() string { return fi.contentType }
+func (fi staticFileInfo) IsDir() bool { return fi.Mode().IsDir() }
+func (fi staticFileInfo) Mode() fs.FileMode { return fi.mode }
+func (fi *staticFileInfo) ModTime() time.Time { return fi.modTime }
+func (fi staticFileInfo) Name() string { return fi.name }
+func (fi staticFileInfo) Size() int64 { return fi.size }
+func (fi staticFileInfo) Sys() interface{} { return nil }
+func (fi *staticFileInfo) Info() (fs.FileInfo, error) { return fi, nil }
+func (fi staticFileInfo) Type() fs.FileMode { return fi.Mode().Type() }
+
+// FileInfoDirEntry adapts a fs.FileInfo into a fs.DirEntry. If it doesn't
+// already implement fs.DirEntry, it will be wrapped to always return the
+// same fs.FileInfo.
+func FileInfoDirEntry(fi fs.FileInfo) fs.DirEntry {
+ de, ok := fi.(fs.DirEntry)
+ if ok {
+ return de
+ }
+
+ return &fileinfoDirEntry{fi}
+}
+
+// a wrapper to make a fs.FileInfo into an fs.DirEntry
+type fileinfoDirEntry struct {
+ fs.FileInfo
+}
+
+var _ fs.DirEntry = (*fileinfoDirEntry)(nil)
+
+func (fi *fileinfoDirEntry) Info() (fs.FileInfo, error) { return fi, nil }
+func (fi *fileinfoDirEntry) Type() fs.FileMode { return fi.Mode().Type() }
diff --git a/internal/datafs/envfs_test.go b/internal/datafs/envfs_test.go
new file mode 100644
index 00000000..d8bfd092
--- /dev/null
+++ b/internal/datafs/envfs_test.go
@@ -0,0 +1,140 @@
+package datafs
+
+import (
+ "io/fs"
+ "net/url"
+ "os"
+ "testing"
+ "testing/fstest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestEnvFS_Open(t *testing.T) {
+ fsys, err := NewEnvFS(nil)
+ assert.NoError(t, err)
+ assert.IsType(t, &envFS{}, fsys)
+
+ f, err := fsys.Open("foo")
+ assert.NoError(t, err)
+ assert.IsType(t, &envFile{}, f)
+}
+
+func TestEnvFile_Read(t *testing.T) {
+ content := `hello world`
+ t.Setenv("HELLO_WORLD", "hello world")
+
+ f := &envFile{name: "HELLO_WORLD"}
+ b := make([]byte, len(content))
+ n, err := f.Read(b)
+ assert.NoError(t, err)
+ assert.Equal(t, len(content), n)
+ assert.Equal(t, content, string(b))
+
+ fsys := fstest.MapFS{}
+ fsys["foo/bar/baz.txt"] = &fstest.MapFile{Data: []byte("\nhello world\n")}
+
+ t.Setenv("FOO_FILE", "/foo/bar/baz.txt")
+
+ f = &envFile{name: "FOO", locfs: fsys}
+
+ b = make([]byte, len(content))
+ t.Logf("b len is %d", len(b))
+ n, err = f.Read(b)
+ t.Logf("b len is %d", len(b))
+ assert.NoError(t, err)
+ assert.Equal(t, len(content), n)
+ assert.Equal(t, content, string(b))
+}
+
+func TestEnvFile_Stat(t *testing.T) {
+ content := []byte(`hello world`)
+ t.Setenv("HELLO_WORLD", "hello world")
+
+ f := &envFile{name: "HELLO_WORLD"}
+
+ fi, err := f.Stat()
+ assert.NoError(t, err)
+ assert.Equal(t, int64(len(content)), fi.Size())
+
+ fsys := fstest.MapFS{}
+ fsys["foo/bar/baz.txt"] = &fstest.MapFile{Data: []byte("\nhello world\n")}
+
+ t.Setenv("FOO_FILE", "/foo/bar/baz.txt")
+
+ f = &envFile{name: "FOO", locfs: fsys}
+
+ fi, err = f.Stat()
+ assert.NoError(t, err)
+ assert.Equal(t, int64(len(content)), fi.Size())
+}
+
+func TestEnvFS(t *testing.T) {
+ t.Cleanup(func() { environ = os.Environ })
+
+ u, _ := url.Parse("env:")
+
+ lfsys := fstest.MapFS{}
+ lfsys["foo/bar/baz.txt"] = &fstest.MapFile{Data: []byte("\nhello file\n")}
+
+ fsys, err := NewEnvFS(u)
+ assert.NoError(t, err)
+ assert.IsType(t, &envFS{}, fsys)
+
+ envfs, ok := fsys.(*envFS)
+ assert.True(t, ok)
+ envfs.locfs = lfsys
+
+ t.Setenv("FOO_FILE", "/foo/bar/baz.txt")
+
+ b, err := fs.ReadFile(fsys, "FOO")
+ assert.NoError(t, err)
+ assert.Equal(t, "hello file", string(b))
+
+ t.Setenv("FOO", "hello world")
+
+ b, err = fs.ReadFile(fsys, "FOO")
+ assert.NoError(t, err)
+ assert.Equal(t, "hello world", string(b))
+
+ assert.NoError(t, fstest.TestFS(fsys, "FOO", "FOO_FILE"))
+}
+
+func TestEnvFile_ReadDir(t *testing.T) {
+ t.Cleanup(func() { environ = os.Environ })
+
+ t.Run("name must be .", func(t *testing.T) {
+ f := &envFile{name: "foo"}
+ _, err := f.ReadDir(-1)
+ require.Error(t, err)
+ })
+
+ t.Run("empty env should return empty dir", func(t *testing.T) {
+ f := &envFile{name: "."}
+ environ = func() []string { return []string{} }
+ des, err := f.ReadDir(-1)
+ require.NoError(t, err)
+ assert.Empty(t, des)
+ })
+
+ t.Run("non-empty env should return dir with entries", func(t *testing.T) {
+ f := &envFile{name: "."}
+ environ = func() []string { return []string{"FOO=bar", "BAR=quux"} }
+ des, err := f.ReadDir(-1)
+ require.NoError(t, err)
+ require.Len(t, des, 2)
+ assert.Equal(t, "FOO", des[0].Name())
+ assert.Equal(t, "BAR", des[1].Name())
+ })
+
+ t.Run("deal with odd Windows env vars like '=C:=C:\tmp'", func(t *testing.T) {
+ f := &envFile{name: "."}
+ environ = func() []string { return []string{"FOO=bar", "=C:=C:\\tmp", "BAR=quux"} }
+ des, err := f.ReadDir(-1)
+ require.NoError(t, err)
+ require.Len(t, des, 2)
+ assert.Equal(t, "FOO", des[0].Name())
+ assert.Equal(t, "BAR", des[1].Name())
+ })
+}
diff --git a/internal/datafs/fsurl.go b/internal/datafs/fsurl.go
new file mode 100644
index 00000000..a1cf5cbc
--- /dev/null
+++ b/internal/datafs/fsurl.go
@@ -0,0 +1,42 @@
+package datafs
+
+import (
+ "net/url"
+ "strings"
+)
+
+// SplitFSMuxURL splits a URL into a filesystem URL and a relative file path
+func SplitFSMuxURL(in *url.URL) (*url.URL, string) {
+ u := *in
+
+ // git URLs are special - they have double-slashes that separate a repo
+ // from a path in the repo. A missing double-slash means the path is the
+ // root.
+ switch u.Scheme {
+ case "git", "git+file", "git+http", "git+https", "git+ssh":
+ repo, base, _ := strings.Cut(u.Path, "//")
+ u.Path = repo
+ if base == "" {
+ base = "."
+ }
+
+ return &u, base
+ }
+
+ // trim leading and trailing slashes - they are not part of a valid path
+ // according to [io/fs.ValidPath]
+ base := strings.Trim(u.Path, "/")
+
+ if base == "" && u.Opaque != "" {
+ base = u.Opaque
+ u.Opaque = ""
+ }
+
+ if base == "" {
+ base = "."
+ }
+
+ u.Path = "/"
+
+ return &u, base
+}
diff --git a/internal/datafs/fsurl_test.go b/internal/datafs/fsurl_test.go
new file mode 100644
index 00000000..a44edaff
--- /dev/null
+++ b/internal/datafs/fsurl_test.go
@@ -0,0 +1,106 @@
+package datafs
+
+import (
+ "net/url"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestSplitFSMuxURL(t *testing.T) {
+ testdata := []struct {
+ in string
+ url string
+ file string
+ }{
+ {
+ "http://example.com/foo.json",
+ "http://example.com/",
+ "foo.json",
+ },
+ {
+ "http://example.com/foo.json?type=application/array+yaml",
+ "http://example.com/?type=application/array+yaml",
+ "foo.json",
+ },
+ {
+ "vault:///secret/a/b/c",
+ "vault:///",
+ "secret/a/b/c",
+ },
+ {
+ "vault:///secret/a/b/",
+ "vault:///",
+ "secret/a/b",
+ },
+ {
+ "s3://bucket/a/b/",
+ "s3://bucket/",
+ "a/b",
+ },
+ {
+ "vault:///foo/bar",
+ "vault:///",
+ "foo/bar",
+ },
+ {
+ "consul://myhost/foo/bar/baz?q=1",
+ "consul://myhost/?q=1",
+ "foo/bar/baz",
+ },
+ {
+ "git+https://example.com/myrepo//foo.yaml",
+ "git+https://example.com/myrepo",
+ "foo.yaml",
+ },
+ {
+ "git+https://example.com/myrepo//",
+ "git+https://example.com/myrepo",
+ ".",
+ },
+ {
+ // git repos are special - no double-slash means the root
+ "git+https://example.com/myrepo",
+ "git+https://example.com/myrepo",
+ ".",
+ },
+ {
+ "git+ssh://git@github.com/hairyhenderson/go-which.git//a/b/c/d?q=1",
+ "git+ssh://git@github.com/hairyhenderson/go-which.git?q=1",
+ "a/b/c/d",
+ },
+ {
+ "merge:file:///tmp/jsonfile.json",
+ "merge:///",
+ "file:///tmp/jsonfile.json",
+ },
+ {
+ "merge:a|b",
+ "merge:///",
+ "a|b",
+ },
+ {
+ "merge:a|b|c|d|e",
+ "merge:///",
+ "a|b|c|d|e",
+ },
+ {
+ "merge:foo/bar/baz.json|qux",
+ "merge:///",
+ "foo/bar/baz.json|qux",
+ },
+ {
+ "merge:vault:///foo/bar|foo|git+ssh://git@github.com/hairyhenderson/go-which.git//a/b/c/d",
+ "merge:///",
+ "vault:///foo/bar|foo|git+ssh://git@github.com/hairyhenderson/go-which.git//a/b/c/d",
+ },
+ }
+
+ for _, d := range testdata {
+ u, err := url.Parse(d.in)
+ assert.NoError(t, err)
+ url, file := SplitFSMuxURL(u)
+ assert.Equal(t, d.url, url.String())
+ assert.Equal(t, d.file, file)
+ }
+}
diff --git a/internal/datafs/fsys.go b/internal/datafs/fsys.go
index fb88b0da..08f52b88 100644
--- a/internal/datafs/fsys.go
+++ b/internal/datafs/fsys.go
@@ -5,11 +5,11 @@ import (
"fmt"
"io/fs"
"net/url"
- "path"
- "path/filepath"
"strings"
"github.com/hairyhenderson/go-fsimpl"
+ "github.com/hairyhenderson/go-fsimpl/vaultfs/vaultauth"
+ "github.com/hairyhenderson/gomplate/v4/internal/urlhelpers"
)
type fsProviderCtxKey struct{}
@@ -29,50 +29,11 @@ func FSProviderFromContext(ctx context.Context) fsimpl.FSProvider {
return nil
}
-// ParseSourceURL parses a datasource URL value, which may be '-' (for stdin://),
-// or it may be a Windows path (with driver letter and back-slash separators) or
-// UNC, or it may be relative. It also might just be a regular absolute URL...
-// In all cases it returns a correct URL for the value. It may be a relative URL
-// in which case the scheme should be assumed to be 'file'
-func ParseSourceURL(value string) (*url.URL, error) {
- if value == "-" {
- value = "stdin://"
- }
- value = filepath.ToSlash(value)
- // handle absolute Windows paths
- volName := ""
- if volName = filepath.VolumeName(value); volName != "" {
- // handle UNCs
- if len(volName) > 2 {
- value = "file:" + value
- } else {
- value = "file:///" + value
- }
- }
- srcURL, err := url.Parse(value)
- if err != nil {
- return nil, err
- }
-
- if volName != "" && len(srcURL.Path) >= 3 {
- if srcURL.Path[0] == '/' && srcURL.Path[2] == ':' {
- srcURL.Path = srcURL.Path[1:]
- }
- }
-
- // if it's an absolute path with no scheme, assume it's a file
- if srcURL.Scheme == "" && path.IsAbs(srcURL.Path) {
- srcURL.Scheme = "file"
- }
-
- return srcURL, nil
-}
-
// FSysForPath returns an [io/fs.FS] for the given path (which may be an URL),
// rooted at /. A [fsimpl.FSProvider] is required to be present in ctx,
// otherwise an error is returned.
func FSysForPath(ctx context.Context, path string) (fs.FS, error) {
- u, err := ParseSourceURL(path)
+ u, err := urlhelpers.ParseSourceURL(path)
if err != nil {
return nil, err
}
@@ -82,18 +43,47 @@ func FSysForPath(ctx context.Context, path string) (fs.FS, error) {
return nil, fmt.Errorf("no filesystem provider in context")
}
- // default to "/" so we have a rooted filesystem for all schemes, but also
- // support volumes on Windows
origPath := u.Path
- if u.Scheme == "file" || strings.HasSuffix(u.Scheme, "+file") || u.Scheme == "" {
- u.Path, _, err = ResolveLocalPath(origPath)
- if err != nil {
- return nil, fmt.Errorf("resolve local path %q: %w", origPath, err)
+
+ switch u.Scheme {
+ case "git+file", "git+http", "git+https", "git+ssh", "git":
+ // git URLs are special - they have double-slashes that separate a repo from
+ // a path in the repo. A missing double-slash means the path is the root.
+ u.Path, _, _ = strings.Cut(u.Path, "//")
+ }
+
+ switch u.Scheme {
+ case "git+http", "git+https", "git+ssh", "git":
+ // no-op, these are handled
+ case "", "file", "git+file":
+ // default to "/" so we have a rooted filesystem for all schemes, but also
+ // support volumes on Windows
+ root, name, rerr := ResolveLocalPath(nil, u.Path)
+ if rerr != nil {
+ return nil, fmt.Errorf("resolve local path %q: %w", origPath, rerr)
+ }
+
+ // windows absolute paths need a slash between the volume and path
+ if root != "" && root[0] != '/' {
+ u.Path = root + "/" + name
+ } else {
+ u.Path = root + name
}
+
// if this is a drive letter, add a trailing slash
- if u.Path[0] != '/' {
+ if len(u.Path) == 2 && u.Path[0] != '/' && u.Path[1] == ':' {
+ u.Path += "/"
+ } else if u.Path[0] != '/' {
u.Path += "/"
}
+
+ // if this starts with a drive letter, add a leading slash
+ // NOPE - this breaks lots of things
+ // if len(u.Path) > 2 && u.Path[0] != '/' && u.Path[1] == ':' {
+ // u.Path = "/" + u.Path
+ // }
+ default:
+ u.Path = "/"
}
fsys, err := fsp.New(u)
@@ -101,6 +91,16 @@ func FSysForPath(ctx context.Context, path string) (fs.FS, error) {
return nil, fmt.Errorf("filesystem provider for %q unavailable: %w", path, err)
}
+ // inject vault auth methods if needed
+ switch u.Scheme {
+ case "vault", "vault+http", "vault+https":
+ fileFsys, err := fsp.New(&url.URL{Scheme: "file", Path: "/"})
+ if err != nil {
+ return nil, fmt.Errorf("filesystem provider for %q unavailable: %w", path, err)
+ }
+ fsys = vaultauth.WithAuthMethod(compositeVaultAuthMethod(fileFsys), fsys)
+ }
+
return fsys, nil
}
diff --git a/internal/datafs/fsys_test.go b/internal/datafs/fsys_test.go
index 80ff5ca1..14e2a0bd 100644
--- a/internal/datafs/fsys_test.go
+++ b/internal/datafs/fsys_test.go
@@ -1,42 +1,78 @@
package datafs
import (
+ "context"
+ "io/fs"
"net/url"
+ "os"
+ "runtime"
"testing"
+ "github.com/hairyhenderson/go-fsimpl"
+ "github.com/hairyhenderson/go-fsimpl/gitfs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
-func TestParseSourceURL(t *testing.T) {
- expected := &url.URL{
- Scheme: "http",
- Host: "example.com",
- Path: "/foo.json",
- RawQuery: "bar",
- }
- u, err := ParseSourceURL("http://example.com/foo.json?bar")
- require.NoError(t, err)
- assert.EqualValues(t, expected, u)
-
- expected = &url.URL{Scheme: "", Path: ""}
- u, err = ParseSourceURL("")
- require.NoError(t, err)
- assert.EqualValues(t, expected, u)
-
- expected = &url.URL{Scheme: "stdin"}
- u, err = ParseSourceURL("-")
- require.NoError(t, err)
- assert.EqualValues(t, expected, u)
-
- // behviour change in v4 - return relative if it's relative
- expected = &url.URL{Path: "./foo/bar.json"}
- u, err = ParseSourceURL("./foo/bar.json")
- require.NoError(t, err)
- assert.EqualValues(t, expected, u)
-
- expected = &url.URL{Scheme: "file", Path: "/absolute/bar.json"}
- u, err = ParseSourceURL("/absolute/bar.json")
- require.NoError(t, err)
- assert.EqualValues(t, expected, u)
+func TestFSysForPath(t *testing.T) {
+ vol, _ := workingVolume()
+
+ t.Run("no provider", func(t *testing.T) {
+ ctx := ContextWithFSProvider(context.Background(), nil)
+ _, err := FSysForPath(ctx, "foo")
+ require.Error(t, err)
+
+ _, err = FSysForPath(ctx, "foo://bar")
+ require.Error(t, err)
+ })
+
+ t.Run("file url", func(t *testing.T) {
+ fsp := fsimpl.FSProviderFunc(func(u *url.URL) (fs.FS, error) {
+ assert.Equal(t, "file", u.Scheme)
+
+ if runtime.GOOS == "windows" {
+ assert.Equal(t, vol+"/tmp/foo/", u.Path)
+ return os.DirFS(vol + "/"), nil
+ }
+
+ assert.Equal(t, "/tmp/foo", u.Path)
+ return os.DirFS("/"), nil
+ }, "file")
+
+ ctx := ContextWithFSProvider(context.Background(), fsp)
+ fsys, err := FSysForPath(ctx, "file:///tmp/foo")
+ require.NoError(t, err)
+ require.NotNil(t, fsys)
+ })
+
+ t.Run("git url", func(t *testing.T) {
+ fsp := fsimpl.FSProviderFunc(func(u *url.URL) (fs.FS, error) {
+ assert.Equal(t, "git://github.com/hairyhenderson/gomplate", u.String())
+ return gitfs.New(u)
+ }, "git")
+
+ ctx := ContextWithFSProvider(context.Background(), fsp)
+
+ fsys, err := FSysForPath(ctx, "git://github.com/hairyhenderson/gomplate//README.md")
+ require.NoError(t, err)
+ require.NotNil(t, fsys)
+ })
+
+ t.Run("git+file url", func(t *testing.T) {
+ fsp := fsimpl.FSProviderFunc(func(u *url.URL) (fs.FS, error) {
+ assert.Equal(t, "git+file", u.Scheme)
+ if runtime.GOOS == "windows" {
+ assert.Equal(t, vol+"/tmp/repo/", u.Path)
+ } else {
+ assert.Equal(t, "/tmp/repo", u.Path)
+ }
+
+ return gitfs.New(u)
+ }, "git+file")
+
+ ctx := ContextWithFSProvider(context.Background(), fsp)
+ fsys, err := FSysForPath(ctx, "git+file:///tmp/repo//README.md")
+ require.NoError(t, err)
+ require.NotNil(t, fsys)
+ })
}
diff --git a/internal/datafs/getenv.go b/internal/datafs/getenv.go
new file mode 100644
index 00000000..5f9761d4
--- /dev/null
+++ b/internal/datafs/getenv.go
@@ -0,0 +1,51 @@
+package datafs
+
+import (
+ "io/fs"
+ "os"
+ "strings"
+)
+
+// ExpandEnvFsys - a convenience function intended for internal use only!
+func ExpandEnvFsys(fsys fs.FS, s string) string {
+ return os.Expand(s, func(s string) string {
+ return GetenvFsys(fsys, s)
+ })
+}
+
+// GetenvFsys - a convenience function intended for internal use only!
+func GetenvFsys(fsys fs.FS, key string, def ...string) string {
+ val := getenvFile(fsys, key)
+ if val == "" && len(def) > 0 {
+ return def[0]
+ }
+
+ return val
+}
+
+func getenvFile(fsys fs.FS, key string) string {
+ val := os.Getenv(key)
+ if val != "" {
+ return val
+ }
+
+ p := os.Getenv(key + "_FILE")
+ if p != "" {
+ val, err := readFile(fsys, p)
+ if err != nil {
+ return ""
+ }
+ return strings.TrimSpace(val)
+ }
+
+ return ""
+}
+
+func readFile(fsys fs.FS, p string) (string, error) {
+ b, err := fs.ReadFile(fsys, p)
+ if err != nil {
+ return "", err
+ }
+
+ return string(b), nil
+}
diff --git a/internal/datafs/getenv_test.go b/internal/datafs/getenv_test.go
new file mode 100644
index 00000000..c05a4c4b
--- /dev/null
+++ b/internal/datafs/getenv_test.go
@@ -0,0 +1,97 @@
+package datafs
+
+import (
+ "errors"
+ "io/fs"
+ "testing"
+ "testing/fstest"
+
+ "github.com/hack-pad/hackpadfs"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestGetenvFsys(t *testing.T) {
+ fsys := fs.FS(fstest.MapFS{
+ "tmp": &fstest.MapFile{Mode: fs.ModeDir | 0o777},
+ "tmp/foo": &fstest.MapFile{Data: []byte("foo")},
+ "tmp/unreadable": &fstest.MapFile{Data: []byte("foo"), Mode: 0o000},
+ })
+ fsys = WrapWdFS(fsys)
+
+ t.Setenv("FOO_FILE", "/tmp/foo")
+ assert.Equal(t, "foo", GetenvFsys(fsys, "FOO", "bar"))
+
+ t.Setenv("FOO_FILE", "/tmp/missing")
+ assert.Equal(t, "bar", GetenvFsys(fsys, "FOO", "bar"))
+
+ fsys = writeOnly(fsys)
+ t.Setenv("FOO_FILE", "/tmp/unreadable")
+ assert.Equal(t, "bar", GetenvFsys(fsys, "FOO", "bar"))
+}
+
+func TestExpandEnvFsys(t *testing.T) {
+ fsys := fs.FS(fstest.MapFS{
+ "tmp": &fstest.MapFile{Mode: fs.ModeDir | 0o777},
+ "tmp/foo": &fstest.MapFile{Data: []byte("foo")},
+ "tmp/unreadable": &fstest.MapFile{Data: []byte("foo"), Mode: 0o000},
+ })
+ fsys = WrapWdFS(fsys)
+
+ t.Setenv("FOO_FILE", "/tmp/foo")
+ assert.Equal(t, "foo is foo", ExpandEnvFsys(fsys, "foo is $FOO"))
+
+ t.Setenv("FOO_FILE", "/tmp/missing")
+ assert.Equal(t, "empty", ExpandEnvFsys(fsys, "${FOO}empty"))
+
+ fsys = writeOnly(fsys)
+ t.Setenv("FOO_FILE", "/tmp/unreadable")
+ assert.Equal(t, "", ExpandEnvFsys(fsys, "${FOO}"))
+}
+
+// Maybe extract this into a separate package sometime...
+// writeOnly - represents a filesystem that's writeable, but read operations fail
+func writeOnly(fsys fs.FS) fs.FS {
+ return &woFS{fsys}
+}
+
+type woFS struct {
+ fsys fs.FS
+}
+
+func (fsys woFS) Open(name string) (fs.File, error) {
+ f, err := fsys.fsys.Open(name)
+ return writeOnlyFile(f), err
+}
+
+func (fsys woFS) ReadDir(_ string) ([]fs.DirEntry, error) {
+ return nil, ErrWriteOnly
+}
+
+func (fsys woFS) Stat(_ string) (fs.FileInfo, error) {
+ return nil, ErrWriteOnly
+}
+
+func writeOnlyFile(f fs.File) fs.File {
+ if f == nil {
+ return nil
+ }
+
+ return &woFile{f}
+}
+
+type woFile struct {
+ fs.File
+}
+
+// Write -
+func (f woFile) Write(p []byte) (n int, err error) {
+ return hackpadfs.WriteFile(f.File, p)
+}
+
+// Read is disabled and returns ErrWriteOnly
+func (f woFile) Read([]byte) (n int, err error) {
+ return 0, ErrWriteOnly
+}
+
+var ErrWriteOnly = errors.New("filesystem is write-only")
diff --git a/internal/datafs/mergefs.go b/internal/datafs/mergefs.go
new file mode 100644
index 00000000..7c612382
--- /dev/null
+++ b/internal/datafs/mergefs.go
@@ -0,0 +1,280 @@
+package datafs
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "net/http"
+ "net/url"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/hairyhenderson/go-fsimpl"
+ "github.com/hairyhenderson/gomplate/v4/coll"
+ "github.com/hairyhenderson/gomplate/v4/internal/config"
+ "github.com/hairyhenderson/gomplate/v4/internal/iohelpers"
+ "github.com/hairyhenderson/gomplate/v4/internal/parsers"
+ "github.com/hairyhenderson/gomplate/v4/internal/urlhelpers"
+)
+
+// NewMergeFS returns a new filesystem that merges the contents of multiple
+// paths. Only a URL like "merge:" or "merge:///" makes sense here - the
+// piped-separated lists of sub-sources to merge must be given to Open.
+//
+// Usually you'll want to use WithDataSourcesFS to provide the map of
+// datasources that can be referenced. Otherwise, only URLs will be supported.
+//
+// An FSProvider will also be needed, which can be provided with a context
+// using ContextWithFSProvider. Provide that context with fsimpl.WithContextFS.
+func NewMergeFS(u *url.URL) (fs.FS, error) {
+ if u.Scheme != "merge" {
+ return nil, fmt.Errorf("unsupported scheme %q", u.Scheme)
+ }
+
+ return &mergeFS{
+ ctx: context.Background(),
+ sources: map[string]config.DataSource{},
+ }, nil
+}
+
+type mergeFS struct {
+ ctx context.Context
+ httpClient *http.Client
+ sources map[string]config.DataSource
+}
+
+//nolint:gochecknoglobals
+var MergeFS = fsimpl.FSProviderFunc(NewMergeFS, "merge")
+
+var (
+ _ fs.FS = (*mergeFS)(nil)
+ _ withContexter = (*mergeFS)(nil)
+ _ withDataSourceser = (*mergeFS)(nil)
+)
+
+func (f *mergeFS) WithContext(ctx context.Context) fs.FS {
+ if ctx == nil {
+ return f
+ }
+
+ fsys := *f
+ fsys.ctx = ctx
+
+ return &fsys
+}
+
+func (f *mergeFS) WithHTTPClient(client *http.Client) fs.FS {
+ if client == nil {
+ return f
+ }
+
+ fsys := *f
+ fsys.httpClient = client
+
+ return &fsys
+}
+
+func (f *mergeFS) WithDataSources(sources map[string]config.DataSource) fs.FS {
+ if sources == nil {
+ return f
+ }
+
+ fsys := *f
+ fsys.sources = sources
+
+ return &fsys
+}
+
+func (f *mergeFS) Open(name string) (fs.File, error) {
+ parts := strings.Split(name, "|")
+ if len(parts) < 2 {
+ return nil, &fs.PathError{
+ Op: "open", Path: name,
+ Err: fmt.Errorf("need at least 2 datasources to merge"),
+ }
+ }
+
+ // now open each of the sub-files
+ subFiles := make([]subFile, len(parts))
+
+ modTime := time.Time{}
+
+ for i, part := range parts {
+ // if this is a datasource, look it up
+ subSource, ok := f.sources[part]
+ if !ok {
+ // maybe it's a relative filename?
+ u, uerr := urlhelpers.ParseSourceURL(part)
+ if uerr != nil {
+ return nil, fmt.Errorf("unknown datasource %q, and couldn't parse URL: %w", part, uerr)
+ }
+ subSource = config.DataSource{URL: u}
+ }
+
+ u := subSource.URL
+
+ fsURL, base := SplitFSMuxURL(u)
+
+ // need to support absolute paths on local filesystem too
+ // TODO: this is a hack, probably fix this?
+ if fsURL.Scheme == "file" && runtime.GOOS != "windows" {
+ base = fsURL.Path + base
+ }
+
+ fsys, err := FSysForPath(f.ctx, fsURL.String())
+ if err != nil {
+ return nil, &fs.PathError{
+ Op: "open", Path: name,
+ Err: fmt.Errorf("lookup for %s: %w", u.String(), err),
+ }
+ }
+
+ // pass in the context and other bits
+ fsys = fsimpl.WithContextFS(f.ctx, fsys)
+ fsys = fsimpl.WithHeaderFS(subSource.Header, fsys)
+
+ fsys = fsimpl.WithHTTPClientFS(f.httpClient, fsys)
+
+ // find the content type
+ fi, err := fs.Stat(fsys, base)
+ if err != nil {
+ return nil, &fs.PathError{
+ Op: "open", Path: name,
+ Err: fmt.Errorf("stat merge part %q: %w", part, err),
+ }
+ }
+
+ if fi.ModTime().After(modTime) {
+ modTime = fi.ModTime()
+ }
+
+ // possible type hint in the type query param. Contrary to spec, we allow
+ // unescaped '+' characters to make it simpler to provide types like
+ // "application/array+json"
+ mimeType := u.Query().Get("type")
+ mimeType = strings.ReplaceAll(mimeType, " ", "+")
+
+ if mimeType == "" {
+ mimeType = fsimpl.ContentType(fi)
+ }
+
+ f, err := fsys.Open(base)
+ if err != nil {
+ return nil, &fs.PathError{
+ Op: "open", Path: name,
+ Err: fmt.Errorf("opening merge part %q: %w", part, err),
+ }
+ }
+
+ subFiles[i] = subFile{f, mimeType}
+ }
+
+ return &mergeFile{
+ name: name,
+ subFiles: subFiles,
+ modTime: modTime,
+ }, nil
+}
+
+type subFile struct {
+ fs.File
+ contentType string
+}
+
+type mergeFile struct {
+ name string
+ merged io.Reader // the file's contents, post-merge - buffered here to enable partial reads
+ fi fs.FileInfo
+ modTime time.Time // the modTime of the most recently modified sub-file
+ subFiles []subFile
+ readMux sync.Mutex
+}
+
+var _ fs.File = (*mergeFile)(nil)
+
+func (f *mergeFile) Close() error {
+ for _, f := range f.subFiles {
+ f.Close()
+ }
+ return nil
+}
+
+func (f *mergeFile) Stat() (fs.FileInfo, error) {
+ if f.merged == nil {
+ p := make([]byte, 0)
+ _, err := f.Read(p)
+ if err != nil && !errors.Is(err, io.EOF) {
+ return nil, fmt.Errorf("read: %w", err)
+ }
+ }
+
+ return f.fi, nil
+}
+
+func (f *mergeFile) Read(p []byte) (int, error) {
+ // read from all and merge, then return the requested amount
+ if f.merged == nil {
+ f.readMux.Lock()
+ defer f.readMux.Unlock()
+ // read from all and merge
+ data := make([]map[string]interface{}, len(f.subFiles))
+ for i, sf := range f.subFiles {
+ b, err := io.ReadAll(sf)
+ if err != nil && !errors.Is(err, io.EOF) {
+ return 0, fmt.Errorf("readAll: %w", err)
+ }
+
+ data[i], err = parseMap(sf.contentType, string(b))
+ if err != nil {
+ return 0, fmt.Errorf("parsing map with content type %s: %w", sf.contentType, err)
+ }
+ }
+
+ md, err := mergeData(data)
+ if err != nil {
+ return 0, fmt.Errorf("mergeData: %w", err)
+ }
+
+ f.merged = bytes.NewReader(md)
+
+ f.fi = FileInfo(f.name, int64(len(md)), 0o400, f.modTime, iohelpers.YAMLMimetype)
+ }
+
+ return f.merged.Read(p)
+}
+
+func mergeData(data []map[string]interface{}) ([]byte, error) {
+ dst := data[0]
+ data = data[1:]
+
+ dst, err := coll.Merge(dst, data...)
+ if err != nil {
+ return nil, err
+ }
+
+ s, err := parsers.ToYAML(dst)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(s), nil
+}
+
+func parseMap(mimeType, data string) (map[string]interface{}, error) {
+ datum, err := parsers.ParseData(mimeType, data)
+ if err != nil {
+ return nil, fmt.Errorf("parseData: %w", err)
+ }
+ var m map[string]interface{}
+ switch datum := datum.(type) {
+ case map[string]interface{}:
+ m = datum
+ default:
+ return nil, fmt.Errorf("unexpected data type '%T' for datasource (type %s); merge: can only merge maps", datum, mimeType)
+ }
+ return m, nil
+}
diff --git a/internal/datafs/mergefs_test.go b/internal/datafs/mergefs_test.go
new file mode 100644
index 00000000..1e2c777b
--- /dev/null
+++ b/internal/datafs/mergefs_test.go
@@ -0,0 +1,358 @@
+package datafs
+
+import (
+ "context"
+ "io"
+ "io/fs"
+ "mime"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "testing"
+ "testing/fstest"
+
+ "github.com/hairyhenderson/go-fsimpl"
+ "github.com/hairyhenderson/gomplate/v4/internal/config"
+ "github.com/hairyhenderson/gomplate/v4/internal/iohelpers"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func mustParseURL(in string) *url.URL {
+ u, _ := url.Parse(in)
+ return u
+}
+
+func setupMergeFsys(ctx context.Context, t *testing.T) fs.FS {
+ t.Helper()
+
+ jsonContent := `{"hello": "world"}`
+ yamlContent := "hello: earth\ngoodnight: moon\n"
+ arrayContent := `["hello", "world"]`
+
+ wd, _ := os.Getwd()
+
+ // MapFS doesn't support windows path separators, so we use / exclusively
+ // in this test
+ vol := filepath.VolumeName(wd)
+ if vol != "" && wd != vol {
+ wd = wd[len(vol)+1:]
+ } else if wd[0] == '/' {
+ wd = wd[1:]
+ }
+ wd = filepath.ToSlash(wd)
+
+ t.Logf("wd: %s", wd)
+
+ fsys := WrapWdFS(fstest.MapFS{
+ "tmp": {Mode: fs.ModeDir | 0o777},
+ "tmp/jsonfile.json": {Data: []byte(jsonContent)},
+ "tmp/array.json": {Data: []byte(arrayContent)},
+ "tmp/yamlfile.yaml": {Data: []byte(yamlContent)},
+ "tmp/textfile.txt": {Data: []byte(`plain text...`)},
+ path.Join(wd, "jsonfile.json"): {Data: []byte(jsonContent)},
+ path.Join(wd, "array.json"): {Data: []byte(arrayContent)},
+ path.Join(wd, "yamlfile.yaml"): {Data: []byte(yamlContent)},
+ path.Join(wd, "textfile.txt"): {Data: []byte(`plain text...`)},
+ path.Join(wd, "tmp/jsonfile.json"): {Data: []byte(jsonContent)},
+ path.Join(wd, "tmp/array.json"): {Data: []byte(arrayContent)},
+ path.Join(wd, "tmp/yamlfile.yaml"): {Data: []byte(yamlContent)},
+ path.Join(wd, "tmp/textfile.txt"): {Data: []byte(`plain text...`)},
+ })
+
+ source := config.DataSource{
+ URL: mustParseURL("merge:file:///tmp/jsonfile.json|file:///tmp/yamlfile.yaml"),
+ }
+ sources := map[string]config.DataSource{
+ "foo": source,
+ "bar": {URL: mustParseURL("file:///tmp/jsonfile.json")},
+ "baz": {URL: mustParseURL("file:///tmp/yamlfile.yaml")},
+ "text": {URL: mustParseURL("file:///tmp/textfile.txt")},
+ "badscheme": {URL: mustParseURL("bad:///scheme.json")},
+ // mime type overridden by URL query, should fail to parse
+ "badtype": {URL: mustParseURL("file:///tmp/jsonfile.json?type=foo/bar")},
+ "array": {
+ URL: mustParseURL("file:///tmp/array.json?type=" + url.QueryEscape(iohelpers.JSONArrayMimetype)),
+ },
+ }
+
+ mux := fsimpl.NewMux()
+ mux.Add(MergeFS)
+ mux.Add(WrappedFSProvider(fsys, "file", ""))
+
+ ctx = ContextWithFSProvider(ctx, mux)
+
+ fsys, err := NewMergeFS(mustParseURL("merge:///"))
+ require.NoError(t, err)
+
+ fsys = WithDataSourcesFS(sources, fsys)
+ fsys = fsimpl.WithContextFS(ctx, fsys)
+
+ return fsys
+}
+
+// func TestReadMerge(t *testing.T) {
+// ctx := context.Background()
+
+// jsonContent := `{"hello": "world"}`
+// yamlContent := "hello: earth\ngoodnight: moon\n"
+// arrayContent := `["hello", "world"]`
+
+// mergedContent := "goodnight: moon\nhello: world\n"
+
+// fsys := fstest.MapFS{}
+// fsys["tmp"] = &fstest.MapFile{Mode: fs.ModeDir | 0777}
+// fsys["tmp/jsonfile.json"] = &fstest.MapFile{Data: []byte(jsonContent)}
+// fsys["tmp/array.json"] = &fstest.MapFile{Data: []byte(arrayContent)}
+// fsys["tmp/yamlfile.yaml"] = &fstest.MapFile{Data: []byte(yamlContent)}
+// fsys["tmp/textfile.txt"] = &fstest.MapFile{Data: []byte(`plain text...`)}
+
+// // workding dir with volume name trimmed
+// wd, _ := os.Getwd()
+// vol := filepath.VolumeName(wd)
+// wd = wd[len(vol)+1:]
+
+// fsys[path.Join(wd, "jsonfile.json")] = &fstest.MapFile{Data: []byte(jsonContent)}
+// fsys[path.Join(wd, "array.json")] = &fstest.MapFile{Data: []byte(arrayContent)}
+// fsys[path.Join(wd, "yamlfile.yaml")] = &fstest.MapFile{Data: []byte(yamlContent)}
+// fsys[path.Join(wd, "textfile.txt")] = &fstest.MapFile{Data: []byte(`plain text...`)}
+
+// fsmux := fsimpl.NewMux()
+// fsmux.Add(fsimpl.WrappedFSProvider(&fsys, "file"))
+// ctx = datafs.ContextWithFSProvider(ctx, fsmux)
+
+// source := &Source{Alias: "foo", URL: mustParseURL("merge:file:///tmp/jsonfile.json|file:///tmp/yamlfile.yaml")}
+// d := &Data{
+// Sources: map[string]*Source{
+// "foo": source,
+// "bar": {Alias: "bar", URL: mustParseURL("file:///tmp/jsonfile.json")},
+// "baz": {Alias: "baz", URL: mustParseURL("file:///tmp/yamlfile.yaml")},
+// "text": {Alias: "text", URL: mustParseURL("file:///tmp/textfile.txt")},
+// "badscheme": {Alias: "badscheme", URL: mustParseURL("bad:///scheme.json")},
+// "badtype": {Alias: "badtype", URL: mustParseURL("file:///tmp/textfile.txt?type=foo/bar")},
+// "array": {Alias: "array", URL: mustParseURL("file:///tmp/array.json?type=" + url.QueryEscape(jsonArrayMimetype))},
+// },
+// Ctx: ctx,
+// }
+
+// actual, err := d.readMerge(ctx, source)
+// assert.NoError(t, err)
+// assert.Equal(t, mergedContent, string(actual))
+
+// source.URL = mustParseURL("merge:bar|baz")
+// actual, err = d.readMerge(ctx, source)
+// assert.NoError(t, err)
+// assert.Equal(t, mergedContent, string(actual))
+
+// source.URL = mustParseURL("merge:./jsonfile.json|baz")
+// actual, err = d.readMerge(ctx, source)
+// assert.NoError(t, err)
+// assert.Equal(t, mergedContent, string(actual))
+
+// source.URL = mustParseURL("merge:file:///tmp/jsonfile.json")
+// _, err = d.readMerge(ctx, source)
+// assert.Error(t, err)
+
+// source.URL = mustParseURL("merge:bogusalias|file:///tmp/jsonfile.json")
+// _, err = d.readMerge(ctx, source)
+// assert.Error(t, err)
+
+// source.URL = mustParseURL("merge:file:///tmp/jsonfile.json|badscheme")
+// _, err = d.readMerge(ctx, source)
+// assert.Error(t, err)
+
+// source.URL = mustParseURL("merge:file:///tmp/jsonfile.json|badtype")
+// _, err = d.readMerge(ctx, source)
+// assert.Error(t, err)
+
+// source.URL = mustParseURL("merge:file:///tmp/jsonfile.json|array")
+// _, err = d.readMerge(ctx, source)
+// assert.Error(t, err)
+// }
+
+func TestMergeData(t *testing.T) {
+ def := map[string]interface{}{
+ "f": true,
+ "t": false,
+ "z": "def",
+ }
+ out, err := mergeData([]map[string]interface{}{def})
+ assert.NoError(t, err)
+ assert.Equal(t, "f: true\nt: false\nz: def\n", string(out))
+
+ over := map[string]interface{}{
+ "f": false,
+ "t": true,
+ "z": "over",
+ }
+ out, err = mergeData([]map[string]interface{}{over, def})
+ assert.NoError(t, err)
+ assert.Equal(t, "f: false\nt: true\nz: over\n", string(out))
+
+ over = map[string]interface{}{
+ "f": false,
+ "t": true,
+ "z": "over",
+ "m": map[string]interface{}{
+ "a": "aaa",
+ },
+ }
+ out, err = mergeData([]map[string]interface{}{over, def})
+ assert.NoError(t, err)
+ assert.Equal(t, "f: false\nm:\n a: aaa\nt: true\nz: over\n", string(out))
+
+ uber := map[string]interface{}{
+ "z": "über",
+ }
+ out, err = mergeData([]map[string]interface{}{uber, over, def})
+ assert.NoError(t, err)
+ assert.Equal(t, "f: false\nm:\n a: aaa\nt: true\nz: über\n", string(out))
+
+ uber = map[string]interface{}{
+ "m": "notamap",
+ "z": map[string]interface{}{
+ "b": "bbb",
+ },
+ }
+ out, err = mergeData([]map[string]interface{}{uber, over, def})
+ assert.NoError(t, err)
+ assert.Equal(t, "f: false\nm: notamap\nt: true\nz:\n b: bbb\n", string(out))
+
+ uber = map[string]interface{}{
+ "m": map[string]interface{}{
+ "b": "bbb",
+ },
+ }
+ out, err = mergeData([]map[string]interface{}{uber, over, def})
+ assert.NoError(t, err)
+ assert.Equal(t, "f: false\nm:\n a: aaa\n b: bbb\nt: true\nz: over\n", string(out))
+}
+
+func TestMergeFS_Open(t *testing.T) {
+ // u, _ := url.Parse("merge:")
+ fsys := setupMergeFsys(context.Background(), t)
+ assert.IsType(t, &mergeFS{}, fsys)
+
+ _, err := fsys.Open("/")
+ assert.Error(t, err)
+
+ _, err = fsys.Open("just/one/part")
+ assert.Error(t, err)
+ assert.ErrorContains(t, err, "need at least 2 datasources to merge")
+
+ // missing aliases, fallback to relative files, but there's no FS registered
+ // for the empty scheme
+ _, err = fsys.Open("a|b")
+ assert.ErrorIs(t, err, fs.ErrNotExist)
+
+ // missing alias
+ _, err = fsys.Open("bogusalias|file:///tmp/jsonfile.json")
+ assert.ErrorIs(t, err, fs.ErrNotExist)
+
+ // unregistered scheme
+ _, err = fsys.Open("file:///tmp/jsonfile.json|badscheme")
+ assert.ErrorContains(t, err, "no filesystem registered for scheme \"bad\"")
+}
+
+func TestMergeFile_Read(t *testing.T) {
+ fsys := fstest.MapFS{
+ "one.yml": {Data: []byte("one: 1\n")},
+ "two.json": {Data: []byte(`{"one": false, "two": 2}`)},
+ "three.toml": {Data: []byte("one = 999\nthree = 3\n")},
+ }
+
+ files := make([]subFile, 3)
+ for i, fn := range []string{"one.yml", "two.json", "three.toml"} {
+ f, _ := fsys.Open(fn)
+ defer f.Close()
+
+ ct := mime.TypeByExtension(filepath.Ext(fn))
+
+ files[i] = subFile{f, ct}
+ }
+
+ mf := &mergeFile{name: "one.yml|two.json|three.toml", subFiles: files}
+
+ b, err := io.ReadAll(mf)
+ require.NoError(t, err)
+ assert.Equal(t, "one: 1\nthree: 3\ntwo: 2\n", string(b))
+
+ // now try with partial reads
+ for i, fn := range []string{"one.yml", "two.json", "three.toml"} {
+ f, _ := fsys.Open(fn)
+ defer f.Close()
+
+ ct := mime.TypeByExtension(filepath.Ext(fn))
+
+ files[i] = subFile{f, ct}
+ }
+
+ mf = &mergeFile{name: "one.yml|two.json|three.toml", subFiles: files}
+
+ p := make([]byte, 10)
+ n, err := mf.Read(p)
+ require.NoError(t, err)
+ assert.Equal(t, 10, n)
+ assert.Equal(t, "one: 1\nthr", string(p))
+
+ n, err = mf.Read(p)
+ require.NoError(t, err)
+ assert.Equal(t, 10, n)
+ assert.Equal(t, "ee: 3\ntwo:", string(p))
+
+ n, err = mf.Read(p)
+ require.NoError(t, err)
+ assert.Equal(t, 3, n)
+ assert.Equal(t, " 2\n 3\ntwo:", string(p))
+}
+
+func TestMergeFS_ReadFile(t *testing.T) {
+ mergedContent := "goodnight: moon\nhello: world\n"
+
+ fsys := setupMergeFsys(context.Background(), t)
+
+ testdata := []string{
+ // absolute URLs
+ "file:///tmp/jsonfile.json|file:///tmp/yamlfile.yaml",
+ // aliases
+ "bar|baz",
+ // mixed relative file and alias
+ "jsonfile.json|baz",
+ // relative file with ./ and alias
+ "./jsonfile.json|baz",
+ }
+
+ for _, td := range testdata {
+ t.Run(td, func(t *testing.T) {
+ f, err := fsys.Open(td)
+ require.NoError(t, err)
+ defer f.Close()
+
+ b, err := io.ReadAll(f)
+ require.NoError(t, err)
+ assert.Equal(t, mergedContent, string(b))
+ })
+ }
+
+ // read errors
+ errortests := []struct {
+ in string
+ expectedError string
+ }{
+ {"file:///tmp/jsonfile.json|badtype", "data of type \"foo/bar\" not yet supported"},
+ {"file:///tmp/jsonfile.json|array", "can only merge maps"},
+ }
+
+ for _, td := range errortests {
+ t.Run(td.in, func(t *testing.T) {
+ f, err := fsys.Open(td.in)
+ require.NoError(t, err)
+ defer f.Close()
+
+ _, err = io.ReadAll(f)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), td.expectedError)
+ })
+ }
+}
diff --git a/internal/datafs/stdinfs.go b/internal/datafs/stdinfs.go
new file mode 100644
index 00000000..46cb030a
--- /dev/null
+++ b/internal/datafs/stdinfs.go
@@ -0,0 +1,110 @@
+package datafs
+
+import (
+ "bytes"
+ "context"
+ "io"
+ "io/fs"
+ "net/url"
+ "time"
+
+ "github.com/hairyhenderson/go-fsimpl"
+)
+
+// NewStdinFS returns a filesystem (an fs.FS) that can be used to read data from
+// standard input (os.Stdin).
+func NewStdinFS(_ *url.URL) (fs.FS, error) {
+ return &stdinFS{ctx: context.Background()}, nil
+}
+
+type stdinFS struct {
+ ctx context.Context
+}
+
+//nolint:gochecknoglobals
+var StdinFS = fsimpl.FSProviderFunc(NewStdinFS, "stdin")
+
+var (
+ _ fs.FS = (*stdinFS)(nil)
+ _ fs.ReadFileFS = (*stdinFS)(nil)
+ _ withContexter = (*stdinFS)(nil)
+)
+
+func (f stdinFS) WithContext(ctx context.Context) fs.FS {
+ fsys := f
+ fsys.ctx = ctx
+
+ return &fsys
+}
+
+func (f *stdinFS) Open(name string) (fs.File, error) {
+ if !fs.ValidPath(name) {
+ return nil, &fs.PathError{
+ Op: "open",
+ Path: name,
+ Err: fs.ErrInvalid,
+ }
+ }
+
+ stdin := StdinFromContext(f.ctx)
+
+ return &stdinFile{name: name, body: stdin}, nil
+}
+
+func (f *stdinFS) ReadFile(name string) ([]byte, error) {
+ if !fs.ValidPath(name) {
+ return nil, &fs.PathError{
+ Op: "readFile",
+ Path: name,
+ Err: fs.ErrInvalid,
+ }
+ }
+
+ stdin := StdinFromContext(f.ctx)
+
+ return io.ReadAll(stdin)
+}
+
+type stdinFile struct {
+ body io.Reader
+ name string
+}
+
+var _ fs.File = (*stdinFile)(nil)
+
+func (f *stdinFile) Close() error {
+ if f.body == nil {
+ return &fs.PathError{Op: "close", Path: f.name, Err: fs.ErrClosed}
+ }
+
+ f.body = nil
+ return nil
+}
+
+func (f *stdinFile) stdinReader() (int, error) {
+ b, err := io.ReadAll(f.body)
+ if err != nil {
+ return 0, err
+ }
+
+ f.body = bytes.NewReader(b)
+
+ return len(b), err
+}
+
+func (f *stdinFile) Stat() (fs.FileInfo, error) {
+ n, err := f.stdinReader()
+ if err != nil {
+ return nil, err
+ }
+
+ return FileInfo(f.name, int64(n), 0o444, time.Time{}, ""), nil
+}
+
+func (f *stdinFile) Read(p []byte) (int, error) {
+ if f.body == nil {
+ return 0, io.EOF
+ }
+
+ return f.body.Read(p)
+}
diff --git a/internal/datafs/stdinfs_test.go b/internal/datafs/stdinfs_test.go
new file mode 100644
index 00000000..2de477cc
--- /dev/null
+++ b/internal/datafs/stdinfs_test.go
@@ -0,0 +1,109 @@
+package datafs
+
+import (
+ "bytes"
+ "context"
+ "io"
+ "io/fs"
+ "net/url"
+ "testing"
+
+ "github.com/hairyhenderson/go-fsimpl"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestStdinFS_Open(t *testing.T) {
+ fsys, err := NewStdinFS(nil)
+ assert.NoError(t, err)
+ assert.IsType(t, &stdinFS{}, fsys)
+
+ f, err := fsys.Open("foo")
+ assert.NoError(t, err)
+ assert.IsType(t, &stdinFile{}, f)
+}
+
+func TestStdinFile_Read(t *testing.T) {
+ content := `hello world`
+
+ f := &stdinFile{name: "foo", body: bytes.NewBufferString(content)}
+ b := make([]byte, len(content))
+ n, err := f.Read(b)
+ assert.NoError(t, err)
+ assert.Equal(t, len(content), n)
+ assert.Equal(t, content, string(b))
+}
+
+func TestStdinFile_Stat(t *testing.T) {
+ content := []byte(`hello world`)
+
+ f := &stdinFile{name: "hello", body: bytes.NewReader(content)}
+
+ fi, err := f.Stat()
+ assert.NoError(t, err)
+ assert.Equal(t, int64(len(content)), fi.Size())
+
+ f = &stdinFile{name: "hello", body: &errorReader{err: fs.ErrPermission}}
+
+ _, err = f.Stat()
+ assert.ErrorIs(t, err, fs.ErrPermission)
+}
+
+func TestStdinFS(t *testing.T) {
+ u, _ := url.Parse("stdin:")
+
+ content := []byte("\nhello file\n")
+
+ ctx := ContextWithStdin(context.Background(), bytes.NewReader(content))
+
+ fsys, err := NewStdinFS(u)
+ assert.NoError(t, err)
+ assert.IsType(t, &stdinFS{}, fsys)
+
+ _, ok := fsys.(*stdinFS)
+ assert.True(t, ok)
+
+ fsys = fsimpl.WithContextFS(ctx, fsys)
+
+ b, err := fs.ReadFile(fsys, "foo")
+ assert.NoError(t, err)
+ assert.Equal(t, "\nhello file\n", string(b))
+
+ ctx = ContextWithStdin(context.Background(), bytes.NewReader(content))
+ fsys = fsimpl.WithContextFS(ctx, fsys)
+
+ _, err = fsys.Open("..")
+ assert.ErrorIs(t, err, fs.ErrInvalid)
+
+ _, err = fs.ReadFile(fsys, "/foo")
+ assert.ErrorIs(t, err, fs.ErrInvalid)
+
+ f, err := fsys.Open("doesn't matter what it's named.txt")
+ assert.NoError(t, err)
+
+ fi, err := f.Stat()
+ assert.NoError(t, err)
+ assert.Equal(t, int64(len(content)), fi.Size())
+
+ b, err = io.ReadAll(f)
+ assert.NoError(t, err)
+ assert.Equal(t, content, b)
+
+ err = f.Close()
+ assert.NoError(t, err)
+
+ err = f.Close()
+ assert.ErrorIs(t, err, fs.ErrClosed)
+
+ p := make([]byte, 5)
+ _, err = f.Read(p)
+ assert.Error(t, err)
+ assert.ErrorIs(t, err, io.EOF)
+}
+
+type errorReader struct {
+ err error
+}
+
+func (r *errorReader) Read(_ []byte) (int, error) {
+ return 0, r.err
+}
diff --git a/internal/datafs/vaultauth.go b/internal/datafs/vaultauth.go
new file mode 100644
index 00000000..3ad733a0
--- /dev/null
+++ b/internal/datafs/vaultauth.go
@@ -0,0 +1,89 @@
+package datafs
+
+import (
+ "context"
+ "fmt"
+ "io/fs"
+ "os"
+
+ "github.com/hairyhenderson/go-fsimpl/vaultfs/vaultauth"
+ "github.com/hairyhenderson/gomplate/v4/internal/deprecated"
+ "github.com/hairyhenderson/gomplate/v4/internal/iohelpers"
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/api/auth/aws"
+)
+
+// compositeVaultAuthMethod configures the auth method based on environment
+// variables. It extends [vaultfs.EnvAuthMethod] by falling back to AWS EC2
+// authentication if the other methods fail.
+func compositeVaultAuthMethod(envFsys fs.FS) api.AuthMethod {
+ return vaultauth.CompositeAuthMethod(
+ vaultauth.EnvAuthMethod(),
+ envEC2AuthAdapter(envFsys),
+ )
+}
+
+// func CompositeVaultAuthMethod() api.AuthMethod {
+// return compositeVaultAuthMethod(WrapWdFS(osfs.NewFS()))
+// }
+
+// envEC2AuthAdapter builds an AWS EC2 authentication method from environment
+// variables, for use only with [CompositeVaultAuthMethod]
+func envEC2AuthAdapter(envFS fs.FS) api.AuthMethod {
+ mountPath := GetenvFsys(envFS, "VAULT_AUTH_AWS_MOUNT", "aws")
+
+ nonce := GetenvFsys(envFS, "VAULT_AUTH_AWS_NONCE")
+ role := GetenvFsys(envFS, "VAULT_AUTH_AWS_ROLE")
+
+ // temporary workaround while we wait to deprecate AWS_META_ENDPOINT
+ if endpoint := os.Getenv("AWS_META_ENDPOINT"); endpoint != "" {
+ deprecated.WarnDeprecated(context.Background(), "Use AWS_EC2_METADATA_SERVICE_ENDPOINT instead of AWS_META_ENDPOINT")
+ if os.Getenv("AWS_EC2_METADATA_SERVICE_ENDPOINT") == "" {
+ os.Setenv("AWS_EC2_METADATA_SERVICE_ENDPOINT", endpoint)
+ }
+ }
+
+ awsauth, err := aws.NewAWSAuth(
+ aws.WithEC2Auth(),
+ aws.WithMountPath(mountPath),
+ aws.WithNonce(nonce),
+ aws.WithRole(role),
+ )
+ if err != nil {
+ return nil
+ }
+
+ output := GetenvFsys(envFS, "VAULT_AUTH_AWS_NONCE_OUTPUT")
+ if output == "" {
+ return awsauth
+ }
+
+ return &ec2AuthNonceWriter{AWSAuth: awsauth, nonce: nonce, output: output}
+}
+
+// ec2AuthNonceWriter - wraps an AWSAuth, and writes the nonce to the nonce
+// output file
+type ec2AuthNonceWriter struct {
+ *aws.AWSAuth
+ nonce string
+ output string
+}
+
+func (a *ec2AuthNonceWriter) Login(ctx context.Context, client *api.Client) (*api.Secret, error) {
+ secret, err := a.AWSAuth.Login(ctx, client)
+ if err != nil {
+ return nil, err
+ }
+
+ nonce := a.nonce
+ if val, ok := secret.Auth.Metadata["nonce"]; ok {
+ nonce = val
+ }
+
+ err = os.WriteFile(a.output, []byte(nonce+"\n"), iohelpers.NormalizeFileMode(0o600))
+ if err != nil {
+ return nil, fmt.Errorf("error writing nonce output file: %w", err)
+ }
+
+ return secret, nil
+}
diff --git a/internal/datafs/wdfs.go b/internal/datafs/wdfs.go
index e696e7d1..e10fb0f5 100644
--- a/internal/datafs/wdfs.go
+++ b/internal/datafs/wdfs.go
@@ -13,23 +13,38 @@ import (
"github.com/hairyhenderson/go-fsimpl"
)
-// ResolveLocalPath resolves a path on the local filesystem, relative to the
+// ResolveLocalPath resolves a path on the given filesystem, relative to the
// current working directory, and returns both the root (/ or a volume name on
// Windows) and the resolved path. If the path is absolute (e.g. starts with a `/` or
// volume name on Windows), it is split and returned as-is.
+// If fsys is nil, the current working directory is used.
// The output is suitable for use with [io/fs] functions.
-//
-// TODO: maybe take fsys as an argument, and if it's a wdFS, use its vol instead
-// of calling os.Getwd?
-func ResolveLocalPath(name string) (root, resolved string, err error) {
+func ResolveLocalPath(fsys fs.FS, name string) (root, resolved string, err error) {
// ignore empty names
if len(name) == 0 {
return "", "", nil
}
+ switch fsys := fsys.(type) {
+ case *wdFS:
+ return resolveLocalPath(fsys.vol, name)
+ default:
+ }
+
+ vol, err := workingVolume()
+ if err != nil {
+ return "", "", err
+ }
+
+ return resolveLocalPath(vol, name)
+}
+
+// workingVolume - returns the current working directory's volume name, or "/" if
+// the current working directory has no volume name (e.g. on Unix).
+func workingVolume() (string, error) {
wd, err := os.Getwd()
if err != nil {
- return "", "", fmt.Errorf("getwd: %w", err)
+ return "", fmt.Errorf("getwd: %w", err)
}
vol := filepath.VolumeName(wd)
@@ -37,11 +52,10 @@ func ResolveLocalPath(name string) (root, resolved string, err error) {
vol = "/"
}
- f := &wdFS{vol: vol}
- return f.resolveLocalPath(name)
+ return vol, nil
}
-func (w *wdFS) resolveLocalPath(name string) (root, resolved string, err error) {
+func resolveLocalPath(wvol, name string) (root, resolved string, err error) {
// ignore empty names
if len(name) == 0 {
return "", "", nil
@@ -53,15 +67,11 @@ func (w *wdFS) resolveLocalPath(name string) (root, resolved string, err error)
// special-case for (Windows) paths that start with '/' but have no volume
// name (e.g. "/foo/bar"). UNC paths (beginning with "//") are ignored.
if name[0] == '/' && (len(name) == 1 || (name[1] != '/' && name[1] != '?')) {
- name = filepath.Join(w.vol, name)
+ name = filepath.Join(wvol, name)
// TODO: maybe this can be reduced to just '!filepath.IsAbs(name)'?
} else if name[0] != '/' && !filepath.IsAbs(name) {
- abs := ""
- abs, err = filepath.Abs(name)
- if err != nil {
- return "", "", fmt.Errorf("abs %q: %w", name, err)
- }
- name = abs
+ wd, _ := os.Getwd()
+ name = filepath.Join(wd, name)
}
name, err = normalizeWindowsPath(name)
@@ -200,7 +210,7 @@ var WdFS = fsimpl.FSProviderFunc(
return nil, fmt.Errorf("unsupported path %q: %w", u.Path, fs.ErrInvalid)
}
- vol, _, err := ResolveLocalPath(u.Path)
+ vol, _, err := ResolveLocalPath(nil, u.Path)
if err != nil {
return nil, fmt.Errorf("resolve %q: %w", u.Path, err)
}
@@ -232,31 +242,37 @@ func WrapWdFS(fsys fs.FS) fs.FS {
return fsys
}
- return &wdFS{fsys: fsys}
+ vol, _ := workingVolume()
+
+ return &wdFS{vol: vol, fsys: fsys}
}
// wdFS is a filesystem wrapper that assumes non-absolute paths are relative to
// the current working directory (as reported by [os.Getwd]).
-// It only works in a meaningful way when used with a local filesystem (e.g.
+// It only works in a meaningful way when used with a local filesystem (e.g.
// [os.DirFS] or [hackpadfs/os.FS]).
type wdFS struct {
fsys fs.FS
- vol string
+
+ // volume name used for drive-relative paths on Windows for cases when they
+ // shouldn't be relative to the current working directory's volume
+ // TODO: validate that this is actually needed
+ vol string
}
var (
- _ fs.FS = &wdFS{}
- _ fs.StatFS = &wdFS{}
- _ fs.ReadFileFS = &wdFS{}
- _ fs.ReadDirFS = &wdFS{}
- _ fs.SubFS = &wdFS{}
- _ fs.GlobFS = &wdFS{}
- _ hackpadfs.CreateFS = &wdFS{}
- _ hackpadfs.OpenFileFS = &wdFS{}
- _ hackpadfs.MkdirFS = &wdFS{}
- _ hackpadfs.MkdirAllFS = &wdFS{}
- _ hackpadfs.RemoveFS = &wdFS{}
- _ hackpadfs.ChmodFS = &wdFS{}
+ _ fs.FS = (*wdFS)(nil)
+ _ fs.StatFS = (*wdFS)(nil)
+ _ fs.ReadFileFS = (*wdFS)(nil)
+ _ fs.ReadDirFS = (*wdFS)(nil)
+ _ fs.SubFS = (*wdFS)(nil)
+ _ fs.GlobFS = (*wdFS)(nil)
+ _ hackpadfs.CreateFS = (*wdFS)(nil)
+ _ hackpadfs.OpenFileFS = (*wdFS)(nil)
+ _ hackpadfs.MkdirFS = (*wdFS)(nil)
+ _ hackpadfs.MkdirAllFS = (*wdFS)(nil)
+ _ hackpadfs.RemoveFS = (*wdFS)(nil)
+ _ hackpadfs.ChmodFS = (*wdFS)(nil)
)
func (w *wdFS) fsysFor(vol string) (fs.FS, error) {
@@ -280,7 +296,7 @@ func (w *wdFS) fsysFor(vol string) (fs.FS, error) {
}
func (w *wdFS) Open(name string) (fs.File, error) {
- root, resolved, err := w.resolveLocalPath(name)
+ root, resolved, err := resolveLocalPath(w.vol, name)
if err != nil {
return nil, fmt.Errorf("resolve: %w", err)
}
@@ -292,7 +308,7 @@ func (w *wdFS) Open(name string) (fs.File, error) {
}
func (w *wdFS) Stat(name string) (fs.FileInfo, error) {
- root, resolved, err := w.resolveLocalPath(name)
+ root, resolved, err := resolveLocalPath(w.vol, name)
if err != nil {
return nil, fmt.Errorf("resolve: %w", err)
}
@@ -304,7 +320,7 @@ func (w *wdFS) Stat(name string) (fs.FileInfo, error) {
}
func (w *wdFS) ReadFile(name string) ([]byte, error) {
- root, resolved, err := w.resolveLocalPath(name)
+ root, resolved, err := resolveLocalPath(w.vol, name)
if err != nil {
return nil, fmt.Errorf("resolve: %w", err)
}
@@ -316,7 +332,7 @@ func (w *wdFS) ReadFile(name string) ([]byte, error) {
}
func (w *wdFS) ReadDir(name string) ([]fs.DirEntry, error) {
- root, resolved, err := w.resolveLocalPath(name)
+ root, resolved, err := resolveLocalPath(w.vol, name)
if err != nil {
return nil, fmt.Errorf("resolve: %w", err)
}
@@ -344,7 +360,7 @@ func (w *wdFS) Glob(_ string) ([]string, error) {
}
func (w *wdFS) Create(name string) (fs.File, error) {
- root, resolved, err := w.resolveLocalPath(name)
+ root, resolved, err := resolveLocalPath(w.vol, name)
if err != nil {
return nil, fmt.Errorf("resolve: %w", err)
}
@@ -356,7 +372,7 @@ func (w *wdFS) Create(name string) (fs.File, error) {
}
func (w *wdFS) OpenFile(name string, flag int, perm fs.FileMode) (fs.File, error) {
- root, resolved, err := w.resolveLocalPath(name)
+ root, resolved, err := resolveLocalPath(w.vol, name)
if err != nil {
return nil, fmt.Errorf("resolve: %w", err)
}
@@ -368,7 +384,7 @@ func (w *wdFS) OpenFile(name string, flag int, perm fs.FileMode) (fs.File, error
}
func (w *wdFS) Mkdir(name string, perm fs.FileMode) error {
- root, resolved, err := w.resolveLocalPath(name)
+ root, resolved, err := resolveLocalPath(w.vol, name)
if err != nil {
return fmt.Errorf("resolve: %w", err)
}
@@ -384,7 +400,7 @@ func (w *wdFS) Mkdir(name string, perm fs.FileMode) error {
}
func (w *wdFS) MkdirAll(name string, perm fs.FileMode) error {
- root, resolved, err := w.resolveLocalPath(name)
+ root, resolved, err := resolveLocalPath(w.vol, name)
if err != nil {
return fmt.Errorf("resolve: %w", err)
}
@@ -396,7 +412,7 @@ func (w *wdFS) MkdirAll(name string, perm fs.FileMode) error {
}
func (w *wdFS) Remove(name string) error {
- root, resolved, err := w.resolveLocalPath(name)
+ root, resolved, err := resolveLocalPath(w.vol, name)
if err != nil {
return fmt.Errorf("resolve: %w", err)
}
@@ -408,7 +424,7 @@ func (w *wdFS) Remove(name string) error {
}
func (w *wdFS) Chmod(name string, mode fs.FileMode) error {
- root, resolved, err := w.resolveLocalPath(name)
+ root, resolved, err := resolveLocalPath(w.vol, name)
if err != nil {
return fmt.Errorf("resolve: %w", err)
}
diff --git a/internal/datafs/wdfs_test.go b/internal/datafs/wdfs_test.go
index 768ec386..f14ebb19 100644
--- a/internal/datafs/wdfs_test.go
+++ b/internal/datafs/wdfs_test.go
@@ -140,6 +140,20 @@ func TestWDFS_WriteOps(t *testing.T) {
// and check that it's gone
_, err = fsys.Stat("/tmp/foo")
assert.ErrorIs(t, err, fs.ErrNotExist)
+
+ // make sure we can write to a subfs
+ subfs, err := fs.Sub(fsys, "tmp")
+ require.NoError(t, err)
+ require.NotNil(t, subfs)
+
+ // this is no longer a wdFS so we need to make sure not to use absolute
+ // paths - the path is relative to the root of the subfs
+ err = hackpadfs.WriteFullFile(subfs, "foo", []byte("hello world"), 0o600)
+ require.NoError(t, err)
+
+ b, err = fs.ReadFile(subfs, "foo")
+ require.NoError(t, err)
+ assert.Equal(t, "hello world", string(b))
}
func skipWindows(t *testing.T) {
@@ -160,6 +174,8 @@ func TestResolveLocalPath_NonWindows(t *testing.T) {
skipWindows(t)
wd, _ := os.Getwd()
+ fsys := &wdFS{vol: "/", fsys: osfs.NewFS()}
+
wd = wd[1:]
testdata := []struct {
@@ -176,7 +192,7 @@ func TestResolveLocalPath_NonWindows(t *testing.T) {
for _, td := range testdata {
td := td
t.Run(td.path, func(t *testing.T) {
- root, path, err := ResolveLocalPath(td.path)
+ root, path, err := ResolveLocalPath(fsys, td.path)
require.NoError(t, err)
assert.Equal(t, "/", root)
assert.Equal(t, td.expected, path)
@@ -189,9 +205,12 @@ func TestResolveLocalPath_Windows(t *testing.T) {
wd, _ := os.Getwd()
volname := filepath.VolumeName(wd)
- wd = wd[len(volname)+1:]
wd = filepath.ToSlash(wd)
+ fsys := &wdFS{vol: volname, fsys: osfs.NewFS()}
+
+ wd = wd[len(volname)+1:]
+
testdata := []struct {
path string
expRoot string
@@ -208,7 +227,7 @@ func TestResolveLocalPath_Windows(t *testing.T) {
for _, td := range testdata {
td := td
t.Run(td.path, func(t *testing.T) {
- root, path, err := ResolveLocalPath(td.path)
+ root, path, err := ResolveLocalPath(fsys, td.path)
require.NoError(t, err)
assert.Equal(t, td.expRoot, root)
assert.Equal(t, td.expected, path)
@@ -233,10 +252,8 @@ func TestWdFS_ResolveLocalPath_NonWindows(t *testing.T) {
{"/", "."},
}
- fsys := &wdFS{}
-
for _, td := range testdata {
- root, path, err := fsys.resolveLocalPath(td.path)
+ root, path, err := resolveLocalPath("/", td.path)
require.NoError(t, err)
assert.Equal(t, "/", root)
assert.Equal(t, td.expected, path)
@@ -248,8 +265,8 @@ func TestWdFS_ResolveLocalPath_Windows(t *testing.T) {
wd, _ := os.Getwd()
volname := filepath.VolumeName(wd)
- wd = wd[len(volname)+1:]
wd = filepath.ToSlash(wd)
+ wd = wd[len(volname)+1:]
testdata := []struct {
path string
@@ -268,12 +285,10 @@ func TestWdFS_ResolveLocalPath_Windows(t *testing.T) {
{`//somehost/share/foo/bar`, "//somehost/share", "foo/bar"},
}
- fsys := &wdFS{vol: volname}
-
for _, td := range testdata {
td := td
t.Run(td.path, func(t *testing.T) {
- root, path, err := fsys.resolveLocalPath(td.path)
+ root, path, err := resolveLocalPath(volname, td.path)
require.NoError(t, err)
assert.Equal(t, td.expRoot, root)
assert.Equal(t, td.expected, path)
diff --git a/internal/deprecated/deprecated.go b/internal/deprecated/deprecated.go
index c453ebbe..6f9ef317 100644
--- a/internal/deprecated/deprecated.go
+++ b/internal/deprecated/deprecated.go
@@ -2,6 +2,8 @@ package deprecated
import (
"context"
+ "fmt"
+ "log/slog"
"github.com/rs/zerolog"
)
@@ -10,5 +12,10 @@ import (
// datasources
func WarnDeprecated(ctx context.Context, msg string) {
logger := zerolog.Ctx(ctx)
+ if !logger.Warn().Enabled() {
+ // we'll flip to slog soon, but in the meantime if we don't have a
+ // logger in the context, just log it
+ slog.WarnContext(ctx, fmt.Sprintf("Deprecated: %s", msg))
+ }
logger.Warn().Msgf("Deprecated: %s", msg)
}
diff --git a/internal/iohelpers/mimetypes.go b/internal/iohelpers/mimetypes.go
new file mode 100644
index 00000000..f0678fe3
--- /dev/null
+++ b/internal/iohelpers/mimetypes.go
@@ -0,0 +1,33 @@
+package iohelpers
+
+import (
+ "mime"
+)
+
+const (
+ TextMimetype = "text/plain"
+ CSVMimetype = "text/csv"
+ JSONMimetype = "application/json"
+ JSONArrayMimetype = "application/array+json"
+ TOMLMimetype = "application/toml"
+ YAMLMimetype = "application/yaml"
+ EnvMimetype = "application/x-env"
+ CUEMimetype = "application/cue"
+)
+
+// mimeTypeAliases defines a mapping for non-canonical mime types that are
+// sometimes seen in the wild
+var mimeTypeAliases = map[string]string{
+ "application/x-yaml": YAMLMimetype,
+ "application/text": TextMimetype,
+}
+
+func MimeAlias(m string) string {
+ // normalize the type by removing any extra parameters
+ m, _, _ = mime.ParseMediaType(m)
+
+ if a, ok := mimeTypeAliases[m]; ok {
+ return a
+ }
+ return m
+}
diff --git a/internal/iohelpers/write_test.go b/internal/iohelpers/write_test.go
new file mode 100644
index 00000000..e11b583f
--- /dev/null
+++ b/internal/iohelpers/write_test.go
@@ -0,0 +1,71 @@
+// this is in a separate package so WriteFile can be more thoroughly tested
+// without involving an import cycle with datafs
+package iohelpers_test
+
+import (
+ "io/fs"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/hack-pad/hackpadfs"
+ osfs "github.com/hack-pad/hackpadfs/os"
+ "github.com/hairyhenderson/gomplate/v4/internal/datafs"
+ "github.com/hairyhenderson/gomplate/v4/internal/iohelpers"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ tfs "gotest.tools/v3/fs"
+)
+
+func TestWrite(t *testing.T) {
+ oldwd, _ := os.Getwd()
+ defer os.Chdir(oldwd)
+
+ rootDir := tfs.NewDir(t, "gomplate-test")
+ t.Cleanup(rootDir.Remove)
+
+ // we want to use a real filesystem here, so we can test interactions with
+ // the current working directory
+ fsys := datafs.WrapWdFS(osfs.NewFS())
+
+ newwd := rootDir.Join("the", "path", "we", "want")
+ badwd := rootDir.Join("some", "other", "dir")
+ hackpadfs.MkdirAll(fsys, newwd, 0o755)
+ hackpadfs.MkdirAll(fsys, badwd, 0o755)
+ newwd, _ = filepath.EvalSymlinks(newwd)
+ badwd, _ = filepath.EvalSymlinks(badwd)
+
+ err := os.Chdir(newwd)
+ require.NoError(t, err)
+
+ err = iohelpers.WriteFile(fsys, "/foo", []byte("Hello world"))
+ assert.Error(t, err)
+
+ rel, err := filepath.Rel(newwd, badwd)
+ require.NoError(t, err)
+ err = iohelpers.WriteFile(fsys, rel, []byte("Hello world"))
+ assert.Error(t, err)
+
+ foopath := filepath.Join(newwd, "foo")
+ err = iohelpers.WriteFile(fsys, foopath, []byte("Hello world"))
+ require.NoError(t, err)
+
+ out, err := fs.ReadFile(fsys, foopath)
+ require.NoError(t, err)
+ assert.Equal(t, "Hello world", string(out))
+
+ err = iohelpers.WriteFile(fsys, foopath, []byte("truncate"))
+ require.NoError(t, err)
+
+ out, err = fs.ReadFile(fsys, foopath)
+ require.NoError(t, err)
+ assert.Equal(t, "truncate", string(out))
+
+ foopath = filepath.Join(newwd, "nonexistant", "subdir", "foo")
+ err = iohelpers.WriteFile(fsys, foopath, []byte("Hello subdirranean world!"))
+ require.NoError(t, err)
+
+ out, err = fs.ReadFile(fsys, foopath)
+ require.NoError(t, err)
+ assert.Equal(t, "Hello subdirranean world!", string(out))
+}
diff --git a/internal/iohelpers/writers_test.go b/internal/iohelpers/writers_test.go
index 8a9c68e0..3dbce93b 100644
--- a/internal/iohelpers/writers_test.go
+++ b/internal/iohelpers/writers_test.go
@@ -4,17 +4,12 @@ import (
"bytes"
"fmt"
"io"
- "io/fs"
"os"
"path/filepath"
"testing"
- "github.com/hack-pad/hackpadfs"
- osfs "github.com/hack-pad/hackpadfs/os"
- "github.com/hairyhenderson/gomplate/v4/internal/datafs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- tfs "gotest.tools/v3/fs"
)
func TestAllWhitespace(t *testing.T) {
@@ -166,58 +161,59 @@ func TestLazyWriteCloser(t *testing.T) {
assert.Error(t, err)
}
-func TestWrite(t *testing.T) {
- oldwd, _ := os.Getwd()
- defer os.Chdir(oldwd)
+// TODO: uncomment this and fix the import cycle!
+// func TestWrite(t *testing.T) {
+// oldwd, _ := os.Getwd()
+// defer os.Chdir(oldwd)
- rootDir := tfs.NewDir(t, "gomplate-test")
- t.Cleanup(rootDir.Remove)
+// rootDir := tfs.NewDir(t, "gomplate-test")
+// t.Cleanup(rootDir.Remove)
- // we want to use a real filesystem here, so we can test interactions with
- // the current working directory
- fsys := datafs.WrapWdFS(osfs.NewFS())
+// // we want to use a real filesystem here, so we can test interactions with
+// // the current working directory
+// fsys := datafs.WrapWdFS(osfs.NewFS())
- newwd := rootDir.Join("the", "path", "we", "want")
- badwd := rootDir.Join("some", "other", "dir")
- hackpadfs.MkdirAll(fsys, newwd, 0o755)
- hackpadfs.MkdirAll(fsys, badwd, 0o755)
- newwd, _ = filepath.EvalSymlinks(newwd)
- badwd, _ = filepath.EvalSymlinks(badwd)
+// newwd := rootDir.Join("the", "path", "we", "want")
+// badwd := rootDir.Join("some", "other", "dir")
+// hackpadfs.MkdirAll(fsys, newwd, 0o755)
+// hackpadfs.MkdirAll(fsys, badwd, 0o755)
+// newwd, _ = filepath.EvalSymlinks(newwd)
+// badwd, _ = filepath.EvalSymlinks(badwd)
- err := os.Chdir(newwd)
- require.NoError(t, err)
+// err := os.Chdir(newwd)
+// require.NoError(t, err)
- err = WriteFile(fsys, "/foo", []byte("Hello world"))
- assert.Error(t, err)
+// err = WriteFile(fsys, "/foo", []byte("Hello world"))
+// assert.Error(t, err)
- rel, err := filepath.Rel(newwd, badwd)
- require.NoError(t, err)
- err = WriteFile(fsys, rel, []byte("Hello world"))
- assert.Error(t, err)
+// rel, err := filepath.Rel(newwd, badwd)
+// require.NoError(t, err)
+// err = WriteFile(fsys, rel, []byte("Hello world"))
+// assert.Error(t, err)
- foopath := filepath.Join(newwd, "foo")
- err = WriteFile(fsys, foopath, []byte("Hello world"))
- require.NoError(t, err)
+// foopath := filepath.Join(newwd, "foo")
+// err = WriteFile(fsys, foopath, []byte("Hello world"))
+// require.NoError(t, err)
- out, err := fs.ReadFile(fsys, foopath)
- require.NoError(t, err)
- assert.Equal(t, "Hello world", string(out))
+// out, err := fs.ReadFile(fsys, foopath)
+// require.NoError(t, err)
+// assert.Equal(t, "Hello world", string(out))
- err = WriteFile(fsys, foopath, []byte("truncate"))
- require.NoError(t, err)
+// err = WriteFile(fsys, foopath, []byte("truncate"))
+// require.NoError(t, err)
- out, err = fs.ReadFile(fsys, foopath)
- require.NoError(t, err)
- assert.Equal(t, "truncate", string(out))
+// out, err = fs.ReadFile(fsys, foopath)
+// require.NoError(t, err)
+// assert.Equal(t, "truncate", string(out))
- foopath = filepath.Join(newwd, "nonexistant", "subdir", "foo")
- err = WriteFile(fsys, foopath, []byte("Hello subdirranean world!"))
- require.NoError(t, err)
+// foopath = filepath.Join(newwd, "nonexistant", "subdir", "foo")
+// err = WriteFile(fsys, foopath, []byte("Hello subdirranean world!"))
+// require.NoError(t, err)
- out, err = fs.ReadFile(fsys, foopath)
- require.NoError(t, err)
- assert.Equal(t, "Hello subdirranean world!", string(out))
-}
+// out, err = fs.ReadFile(fsys, foopath)
+// require.NoError(t, err)
+// assert.Equal(t, "Hello subdirranean world!", string(out))
+// }
func TestAssertPathInWD(t *testing.T) {
oldwd, _ := os.Getwd()
diff --git a/internal/parsers/parsefuncs.go b/internal/parsers/parsefuncs.go
new file mode 100644
index 00000000..c16f86e7
--- /dev/null
+++ b/internal/parsers/parsefuncs.go
@@ -0,0 +1,528 @@
+// Package parsers has internal parsers for various formats.
+package parsers
+
+import (
+ "bytes"
+ "encoding/csv"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "cuelang.org/go/cue"
+ "cuelang.org/go/cue/cuecontext"
+ "cuelang.org/go/cue/format"
+ "github.com/Shopify/ejson"
+ ejsonJson "github.com/Shopify/ejson/json"
+ "github.com/hairyhenderson/gomplate/v4/conv"
+ "github.com/joho/godotenv"
+
+ // XXX: replace once https://github.com/BurntSushi/toml/pull/179 is merged
+ "github.com/hairyhenderson/toml"
+ "github.com/ugorji/go/codec"
+
+ "github.com/hairyhenderson/yaml"
+)
+
+func unmarshalObj(obj map[string]interface{}, in string, f func([]byte, interface{}) error) (map[string]interface{}, error) {
+ err := f([]byte(in), &obj)
+ if err != nil {
+ return nil, fmt.Errorf("unable to unmarshal object %s: %w", in, err)
+ }
+ return obj, nil
+}
+
+func unmarshalArray(obj []interface{}, in string, f func([]byte, interface{}) error) ([]interface{}, error) {
+ err := f([]byte(in), &obj)
+ if err != nil {
+ return nil, fmt.Errorf("unable to unmarshal array %s: %w", in, err)
+ }
+ return obj, nil
+}
+
+// JSON - Unmarshal a JSON Object. Can be ejson-encrypted.
+func JSON(in string) (map[string]interface{}, error) {
+ obj := make(map[string]interface{})
+ out, err := unmarshalObj(obj, in, yaml.Unmarshal)
+ if err != nil {
+ return out, err
+ }
+
+ _, ok := out[ejsonJson.PublicKeyField]
+ if ok {
+ out, err = decryptEJSON(in)
+ }
+ return out, err
+}
+
+// decryptEJSON - decrypts an ejson input, and unmarshals it, stripping the _public_key field.
+func decryptEJSON(in string) (map[string]interface{}, error) {
+ keyDir := getenv("EJSON_KEYDIR", "/opt/ejson/keys")
+ key := getenv("EJSON_KEY")
+
+ rIn := bytes.NewBufferString(in)
+ rOut := &bytes.Buffer{}
+ err := ejson.Decrypt(rIn, rOut, keyDir, key)
+ if err != nil {
+ return nil, err
+ }
+ obj := make(map[string]interface{})
+ out, err := unmarshalObj(obj, rOut.String(), yaml.Unmarshal)
+ if err != nil {
+ return nil, err
+ }
+ delete(out, ejsonJson.PublicKeyField)
+ return out, nil
+}
+
+// a reimplementation of env.Getenv to avoid import cycles
+func getenv(key string, def ...string) string {
+ val := os.Getenv(key)
+ if val != "" {
+ return val
+ }
+
+ p := os.Getenv(key + "_FILE")
+ if p != "" {
+ b, err := os.ReadFile(p)
+ if err != nil {
+ return ""
+ }
+
+ val = strings.TrimSpace(string(b))
+ }
+
+ if val == "" && len(def) > 0 {
+ return def[0]
+ }
+
+ return val
+}
+
+// JSONArray - Unmarshal a JSON Array
+func JSONArray(in string) ([]interface{}, error) {
+ obj := make([]interface{}, 1)
+ return unmarshalArray(obj, in, yaml.Unmarshal)
+}
+
+// YAML - Unmarshal a YAML Object
+func YAML(in string) (map[string]interface{}, error) {
+ obj := make(map[string]interface{})
+ s := strings.NewReader(in)
+ d := yaml.NewDecoder(s)
+ for {
+ err := d.Decode(&obj)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
+ if obj != nil {
+ break
+ }
+ }
+
+ err := stringifyYAMLMapMapKeys(obj)
+ return obj, err
+}
+
+// YAMLArray - Unmarshal a YAML Array
+func YAMLArray(in string) ([]interface{}, error) {
+ obj := make([]interface{}, 1)
+ s := strings.NewReader(in)
+ d := yaml.NewDecoder(s)
+ for {
+ err := d.Decode(&obj)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
+ if obj != nil {
+ break
+ }
+ }
+ err := stringifyYAMLArrayMapKeys(obj)
+ return obj, err
+}
+
+// stringifyYAMLArrayMapKeys recurses into the input array and changes all
+// non-string map keys to string map keys. Modifies the input array.
+func stringifyYAMLArrayMapKeys(in []interface{}) error {
+ if _, changed := stringifyMapKeys(in); changed {
+ return fmt.Errorf("stringifyYAMLArrayMapKeys: output type did not match input type, this should be impossible")
+ }
+ return nil
+}
+
+// stringifyYAMLMapMapKeys recurses into the input map and changes all
+// non-string map keys to string map keys. Modifies the input map.
+func stringifyYAMLMapMapKeys(in map[string]interface{}) error {
+ if _, changed := stringifyMapKeys(in); changed {
+ return fmt.Errorf("stringifyYAMLMapMapKeys: output type did not match input type, this should be impossible")
+ }
+ return nil
+}
+
+// stringifyMapKeys recurses into in and changes all instances of
+// map[interface{}]interface{} to map[string]interface{}. This is useful to
+// work around the impedance mismatch between JSON and YAML unmarshaling that's
+// described here: https://github.com/go-yaml/yaml/issues/139
+//
+// Taken and modified from https://github.com/gohugoio/hugo/blob/cdfd1c99baa22d69e865294dfcd783811f96c880/parser/metadecoders/decoder.go#L257, Apache License 2.0
+// Originally inspired by https://github.com/stripe/stripe-mock/blob/24a2bb46a49b2a416cfea4150ab95781f69ee145/mapstr.go#L13, MIT License
+func stringifyMapKeys(in interface{}) (interface{}, bool) {
+ switch in := in.(type) {
+ case []interface{}:
+ for i, v := range in {
+ if vv, replaced := stringifyMapKeys(v); replaced {
+ in[i] = vv
+ }
+ }
+ case map[string]interface{}:
+ for k, v := range in {
+ if vv, changed := stringifyMapKeys(v); changed {
+ in[k] = vv
+ }
+ }
+ case map[interface{}]interface{}:
+ res := make(map[string]interface{})
+
+ for k, v := range in {
+ ks := conv.ToString(k)
+ if vv, replaced := stringifyMapKeys(v); replaced {
+ res[ks] = vv
+ } else {
+ res[ks] = v
+ }
+ }
+ return res, true
+ }
+
+ return nil, false
+}
+
+// TOML - Unmarshal a TOML Object
+func TOML(in string) (interface{}, error) {
+ obj := make(map[string]interface{})
+ return unmarshalObj(obj, in, toml.Unmarshal)
+}
+
+// DotEnv - Unmarshal a dotenv file
+func DotEnv(in string) (interface{}, error) {
+ env, err := godotenv.Unmarshal(in)
+ if err != nil {
+ return nil, err
+ }
+ out := make(map[string]interface{})
+ for k, v := range env {
+ out[k] = v
+ }
+ return out, nil
+}
+
+func parseCSV(args ...string) ([][]string, []string, error) {
+ in, delim, hdr := csvParseArgs(args...)
+ c := csv.NewReader(strings.NewReader(in))
+ c.Comma = rune(delim[0])
+ records, err := c.ReadAll()
+ if err != nil {
+ return nil, nil, err
+ }
+ if len(records) > 0 {
+ if hdr == nil {
+ hdr = records[0]
+ records = records[1:]
+ } else if len(hdr) == 0 {
+ hdr = make([]string, len(records[0]))
+ for i := range hdr {
+ hdr[i] = autoIndex(i)
+ }
+ }
+ }
+ return records, hdr, nil
+}
+
+func csvParseArgs(args ...string) (in, delim string, hdr []string) {
+ delim = ","
+ switch len(args) {
+ case 1:
+ in = args[0]
+ case 2:
+ in = args[1]
+ switch len(args[0]) {
+ case 1:
+ delim = args[0]
+ case 0:
+ hdr = []string{}
+ default:
+ hdr = strings.Split(args[0], delim)
+ }
+ case 3:
+ delim = args[0]
+ hdr = strings.Split(args[1], delim)
+ in = args[2]
+ }
+ return in, delim, hdr
+}
+
+// autoIndex - calculates a default string column name given a numeric value
+func autoIndex(i int) string {
+ s := &strings.Builder{}
+ for n := 0; n <= i/26; n++ {
+ s.WriteRune('A' + rune(i%26))
+ }
+ return s.String()
+}
+
+// CSV - Unmarshal CSV
+// parameters:
+//
+// delim - (optional) the (single-character!) field delimiter, defaults to ","
+// in - the CSV-format string to parse
+//
+// returns:
+//
+// an array of rows, which are arrays of cells (strings)
+func CSV(args ...string) ([][]string, error) {
+ records, hdr, err := parseCSV(args...)
+ if err != nil {
+ return nil, err
+ }
+ records = append(records, nil)
+ copy(records[1:], records)
+ records[0] = hdr
+ return records, nil
+}
+
+// CSVByRow - Unmarshal CSV in a row-oriented form
+// parameters:
+//
+// delim - (optional) the (single-character!) field delimiter, defaults to ","
+// hdr - (optional) list of column names separated by `delim`,
+// set to "" to get auto-named columns (A-Z), omit
+// to use the first line
+// in - the CSV-format string to parse
+//
+// returns:
+//
+// an array of rows, indexed by the header name
+func CSVByRow(args ...string) (rows []map[string]string, err error) {
+ records, hdr, err := parseCSV(args...)
+ if err != nil {
+ return nil, err
+ }
+ for _, record := range records {
+ m := make(map[string]string)
+ for i, v := range record {
+ m[hdr[i]] = v
+ }
+ rows = append(rows, m)
+ }
+ return rows, nil
+}
+
+// CSVByColumn - Unmarshal CSV in a Columnar form
+// parameters:
+//
+// delim - (optional) the (single-character!) field delimiter, defaults to ","
+// hdr - (optional) list of column names separated by `delim`,
+// set to "" to get auto-named columns (A-Z), omit
+// to use the first line
+// in - the CSV-format string to parse
+//
+// returns:
+//
+// a map of columns, indexed by the header name. values are arrays of strings
+func CSVByColumn(args ...string) (cols map[string][]string, err error) {
+ records, hdr, err := parseCSV(args...)
+ if err != nil {
+ return nil, err
+ }
+ cols = make(map[string][]string)
+ for _, record := range records {
+ for i, v := range record {
+ cols[hdr[i]] = append(cols[hdr[i]], v)
+ }
+ }
+ return cols, nil
+}
+
+// ToCSV -
+func ToCSV(args ...interface{}) (string, error) {
+ delim := ","
+ var in [][]string
+ if len(args) == 2 {
+ var ok bool
+ delim, ok = args[0].(string)
+ if !ok {
+ return "", fmt.Errorf("can't parse ToCSV delimiter (%v) - must be string (is a %T)", args[0], args[0])
+ }
+ args = args[1:]
+ }
+ if len(args) == 1 {
+ switch a := args[0].(type) {
+ case [][]string:
+ in = a
+ case [][]interface{}:
+ in = make([][]string, len(a))
+ for i, v := range a {
+ in[i] = conv.ToStrings(v...)
+ }
+ case []interface{}:
+ in = make([][]string, len(a))
+ for i, v := range a {
+ ar, ok := v.([]interface{})
+ if !ok {
+ return "", fmt.Errorf("can't parse ToCSV input - must be a two-dimensional array (like [][]string or [][]interface{}) (was %T)", args[0])
+ }
+ in[i] = conv.ToStrings(ar...)
+ }
+ default:
+ return "", fmt.Errorf("can't parse ToCSV input - must be a two-dimensional array (like [][]string or [][]interface{}) (was %T)", args[0])
+ }
+ }
+ b := &bytes.Buffer{}
+ c := csv.NewWriter(b)
+ c.Comma = rune(delim[0])
+ // We output RFC4180 CSV, so force this to CRLF
+ c.UseCRLF = true
+ err := c.WriteAll(in)
+ if err != nil {
+ return "", err
+ }
+ return b.String(), nil
+}
+
+func marshalObj(obj interface{}, f func(interface{}) ([]byte, error)) (string, error) {
+ b, err := f(obj)
+ if err != nil {
+ return "", fmt.Errorf("unable to marshal object %s: %w", obj, err)
+ }
+
+ return string(b), nil
+}
+
+func toJSONBytes(in interface{}) ([]byte, error) {
+ h := &codec.JsonHandle{}
+ h.Canonical = true
+ buf := new(bytes.Buffer)
+ err := codec.NewEncoder(buf, h).Encode(in)
+ if err != nil {
+ return nil, fmt.Errorf("unable to marshal %s: %w", in, err)
+ }
+ return buf.Bytes(), nil
+}
+
+// ToJSON - Stringify a struct as JSON
+func ToJSON(in interface{}) (string, error) {
+ s, err := toJSONBytes(in)
+ if err != nil {
+ return "", err
+ }
+ return string(s), nil
+}
+
+// ToJSONPretty - Stringify a struct as JSON (indented)
+func ToJSONPretty(indent string, in interface{}) (string, error) {
+ out := new(bytes.Buffer)
+ b, err := toJSONBytes(in)
+ if err != nil {
+ return "", err
+ }
+ err = json.Indent(out, b, "", indent)
+ if err != nil {
+ return "", fmt.Errorf("unable to indent JSON %s: %w", b, err)
+ }
+
+ return out.String(), nil
+}
+
+// ToYAML - Stringify a struct as YAML
+func ToYAML(in interface{}) (string, error) {
+ // I'd use yaml.Marshal, but between v2 and v3 the indent has changed from
+ // 2 to 4. This explicitly sets it back to 2.
+ marshal := func(in interface{}) (out []byte, err error) {
+ buf := &bytes.Buffer{}
+ e := yaml.NewEncoder(buf)
+ e.SetIndent(2)
+ defer e.Close()
+ err = e.Encode(in)
+ return buf.Bytes(), err
+ }
+
+ return marshalObj(in, marshal)
+}
+
+// ToTOML - Stringify a struct as TOML
+func ToTOML(in interface{}) (string, error) {
+ buf := new(bytes.Buffer)
+ err := toml.NewEncoder(buf).Encode(in)
+ if err != nil {
+ return "", fmt.Errorf("unable to marshal %s: %w", in, err)
+ }
+ return buf.String(), nil
+}
+
+// CUE - Unmarshal a CUE expression into the appropriate type
+func CUE(in string) (interface{}, error) {
+ cuectx := cuecontext.New()
+ val := cuectx.CompileString(in)
+
+ if val.Err() != nil {
+ return nil, fmt.Errorf("unable to process CUE: %w", val.Err())
+ }
+
+ switch val.Kind() {
+ case cue.StructKind:
+ out := map[string]interface{}{}
+ err := val.Decode(&out)
+ return out, err
+ case cue.ListKind:
+ out := []interface{}{}
+ err := val.Decode(&out)
+ return out, err
+ case cue.BytesKind:
+ out := []byte{}
+ err := val.Decode(&out)
+ return out, err
+ case cue.StringKind:
+ out := ""
+ err := val.Decode(&out)
+ return out, err
+ case cue.IntKind:
+ out := 0
+ err := val.Decode(&out)
+ return out, err
+ case cue.NumberKind, cue.FloatKind:
+ out := 0.0
+ err := val.Decode(&out)
+ return out, err
+ case cue.BoolKind:
+ out := false
+ err := val.Decode(&out)
+ return out, err
+ case cue.NullKind:
+ return nil, nil
+ default:
+ return nil, fmt.Errorf("unsupported CUE type %q", val.Kind())
+ }
+}
+
+func ToCUE(in interface{}) (string, error) {
+ cuectx := cuecontext.New()
+ v := cuectx.Encode(in)
+ if v.Err() != nil {
+ return "", v.Err()
+ }
+
+ bs, err := format.Node(v.Syntax())
+ if err != nil {
+ return "", err
+ }
+
+ return string(bs), nil
+}
diff --git a/internal/parsers/parsefuncs_test.go b/internal/parsers/parsefuncs_test.go
new file mode 100644
index 00000000..caf83fba
--- /dev/null
+++ b/internal/parsers/parsefuncs_test.go
@@ -0,0 +1,777 @@
+package parsers
+
+import (
+ "fmt"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/ugorji/go/codec"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "gotest.tools/v3/fs"
+)
+
+func TestUnmarshalObj(t *testing.T) {
+ expected := map[string]interface{}{
+ "foo": map[string]interface{}{"bar": "baz"},
+ "one": 1.0,
+ "true": true,
+ "escaped": "\"/\\\b\f\n\r\t∞",
+ }
+
+ test := func(actual map[string]interface{}, err error) {
+ t.Helper()
+ require.NoError(t, err)
+ assert.Equal(t, expected["foo"], actual["foo"], "foo")
+ assert.Equal(t, expected["one"], actual["one"], "one")
+ assert.Equal(t, expected["true"], actual["true"], "true")
+ assert.Equal(t, expected["escaped"], actual["escaped"], "escaped")
+ }
+ test(JSON(`{"foo":{"bar":"baz"},"one":1.0,"true":true,"escaped":"\"\/\\\b\f\n\r\t\u221e"}`))
+ test(YAML(`foo:
+ bar: baz
+one: 1.0
+'true': true
+escaped: "\"\/\\\b\f\n\r\t\u221e"
+`))
+ test(YAML(`anchor: &anchor
+ bar: baz
+foo:
+ <<: *anchor
+one: 1.0
+'true': true
+escaped: "\"\/\\\b\f\n\r\t\u221e"
+`))
+ test(YAML(`# this comment marks an empty (nil!) document
+---
+# this one too, for good measure
+---
+foo:
+ bar: baz
+one: 1.0
+'true': true
+escaped: "\"\/\\\b\f\n\r\t\u221e"
+`))
+
+ obj := make(map[string]interface{})
+ _, err := unmarshalObj(obj, "SOMETHING", func(in []byte, out interface{}) error {
+ return fmt.Errorf("fail")
+ })
+ assert.EqualError(t, err, "unable to unmarshal object SOMETHING: fail")
+}
+
+func TestUnmarshalArray(t *testing.T) {
+ expected := []interface{}{
+ "foo", "bar",
+ map[string]interface{}{
+ "baz": map[string]interface{}{"qux": true},
+ "quux": map[string]interface{}{"42": 18},
+ "corge": map[string]interface{}{"false": "blah"},
+ },
+ }
+
+ test := func(actual []interface{}, err error) {
+ require.NoError(t, err)
+ assert.EqualValues(t, expected, actual)
+ }
+ test(JSONArray(`["foo","bar",{"baz":{"qux": true},"quux":{"42":18},"corge":{"false":"blah"}}]`))
+ test(YAMLArray(`
+- foo
+- bar
+- baz:
+ qux: true
+ quux:
+ "42": 18
+ corge:
+ "false": blah
+`))
+ test(YAMLArray(`---
+# blah blah blah ignore this!
+---
+- foo
+- bar
+- baz:
+ qux: true
+ quux:
+ "42": 18
+ corge:
+ "false": blah
+---
+this shouldn't be reached
+`))
+
+ actual, err := YAMLArray(`---
+- foo: &foo
+ bar: baz
+- qux:
+ <<: *foo
+ quux: corge
+- baz:
+ qux: true
+ 42: 18
+ false: blah
+`)
+ require.NoError(t, err)
+ assert.EqualValues(t,
+ []interface{}{
+ map[string]interface{}{
+ "foo": map[string]interface{}{
+ "bar": "baz",
+ },
+ },
+ map[string]interface{}{
+ "qux": map[string]interface{}{
+ "bar": "baz",
+ "quux": "corge",
+ },
+ },
+ map[string]interface{}{
+ "baz": map[string]interface{}{
+ "qux": true,
+ "42": 18,
+ "false": "blah",
+ },
+ },
+ },
+ actual)
+
+ obj := make([]interface{}, 1)
+ _, err = unmarshalArray(obj, "SOMETHING", func(in []byte, out interface{}) error {
+ return fmt.Errorf("fail")
+ })
+ assert.EqualError(t, err, "unable to unmarshal array SOMETHING: fail")
+}
+
+func TestMarshalObj(t *testing.T) {
+ expected := "foo"
+ actual, err := marshalObj(nil, func(in interface{}) ([]byte, error) {
+ return []byte("foo"), nil
+ })
+ require.NoError(t, err)
+ assert.Equal(t, expected, actual)
+ _, err = marshalObj(nil, func(in interface{}) ([]byte, error) {
+ return nil, fmt.Errorf("fail")
+ })
+ assert.Error(t, err)
+}
+
+func TestToJSONBytes(t *testing.T) {
+ expected := []byte("null")
+ actual, err := toJSONBytes(nil)
+ require.NoError(t, err)
+ assert.Equal(t, expected, actual)
+
+ _, err = toJSONBytes(&badObject{})
+ assert.Error(t, err)
+}
+
+type badObject struct{}
+
+func (b *badObject) CodecEncodeSelf(_ *codec.Encoder) {
+ panic("boom")
+}
+
+func (b *badObject) CodecDecodeSelf(_ *codec.Decoder) {
+}
+
+func TestToJSON(t *testing.T) {
+ expected := `{"down":{"the":{"rabbit":{"hole":true}}},"foo":"bar","one":1,"true":true}`
+ in := map[string]interface{}{
+ "foo": "bar",
+ "one": 1,
+ "true": true,
+ "down": map[interface{}]interface{}{
+ "the": map[interface{}]interface{}{
+ "rabbit": map[interface{}]interface{}{
+ "hole": true,
+ },
+ },
+ },
+ }
+ out, err := ToJSON(in)
+ require.NoError(t, err)
+ assert.Equal(t, expected, out)
+
+ _, err = ToJSON(&badObject{})
+ assert.Error(t, err)
+}
+
+func TestToJSONPretty(t *testing.T) {
+ expected := `{
+ "down": {
+ "the": {
+ "rabbit": {
+ "hole": true
+ }
+ }
+ },
+ "foo": "bar",
+ "one": 1,
+ "true": true
+}`
+ in := map[string]interface{}{
+ "foo": "bar",
+ "one": 1,
+ "true": true,
+ "down": map[string]interface{}{
+ "the": map[string]interface{}{
+ "rabbit": map[string]interface{}{
+ "hole": true,
+ },
+ },
+ },
+ }
+ out, err := ToJSONPretty(" ", in)
+ require.NoError(t, err)
+ assert.Equal(t, expected, out)
+
+ _, err = ToJSONPretty(" ", &badObject{})
+ assert.Error(t, err)
+}
+
+func TestToYAML(t *testing.T) {
+ expected := `d: 2006-01-02T15:04:05.999999999-07:00
+foo: bar
+? |-
+ multi
+ line
+ key
+: hello: world
+one: 1
+"true": true
+`
+ mst, _ := time.LoadLocation("MST")
+ in := map[string]interface{}{
+ "foo": "bar",
+ "one": 1,
+ "true": true,
+ `multi
+line
+key`: map[string]interface{}{
+ "hello": "world",
+ },
+ "d": time.Date(2006, time.January, 2, 15, 4, 5, 999999999, mst),
+ }
+ out, err := ToYAML(in)
+ require.NoError(t, err)
+ assert.Equal(t, expected, out)
+}
+
+func TestCSV(t *testing.T) {
+ expected := [][]string{
+ {"first", "second", "third"},
+ {"1", "2", "3"},
+ {"4", "5", "6"},
+ }
+ testdata := []struct {
+ args []string
+ out [][]string
+ }{
+ {[]string{"first,second,third\n1,2,3\n4,5,6"}, expected},
+ {[]string{";", "first;second;third\r\n1;2;3\r\n4;5;6\r\n"}, expected},
+
+ {[]string{""}, [][]string{nil}},
+ {[]string{"\n"}, [][]string{nil}},
+ {[]string{"foo"}, [][]string{{"foo"}}},
+ }
+ for _, d := range testdata {
+ out, err := CSV(d.args...)
+ require.NoError(t, err)
+ assert.Equal(t, d.out, out)
+ }
+}
+
+func TestCSVByRow(t *testing.T) {
+ in := "first,second,third\n1,2,3\n4,5,6"
+ expected := []map[string]string{
+ {
+ "first": "1",
+ "second": "2",
+ "third": "3",
+ },
+ {
+ "first": "4",
+ "second": "5",
+ "third": "6",
+ },
+ }
+ testdata := []struct {
+ args []string
+ out []map[string]string
+ }{
+ {[]string{in}, expected},
+ {[]string{"first,second,third", "1,2,3\n4,5,6"}, expected},
+ {[]string{";", "first;second;third", "1;2;3\n4;5;6"}, expected},
+ {[]string{";", "first;second;third\r\n1;2;3\r\n4;5;6"}, expected},
+ {[]string{"", "1,2,3\n4,5,6"}, []map[string]string{
+ {"A": "1", "B": "2", "C": "3"},
+ {"A": "4", "B": "5", "C": "6"},
+ }},
+ {[]string{"", "1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1"}, []map[string]string{
+ {"A": "1", "B": "1", "C": "1", "D": "1", "E": "1", "F": "1", "G": "1", "H": "1", "I": "1", "J": "1", "K": "1", "L": "1", "M": "1", "N": "1", "O": "1", "P": "1", "Q": "1", "R": "1", "S": "1", "T": "1", "U": "1", "V": "1", "W": "1", "X": "1", "Y": "1", "Z": "1", "AA": "1", "BB": "1", "CC": "1", "DD": "1"},
+ }},
+ }
+ for _, d := range testdata {
+ out, err := CSVByRow(d.args...)
+ require.NoError(t, err)
+ assert.Equal(t, d.out, out)
+ }
+}
+
+func TestCSVByColumn(t *testing.T) {
+ expected := map[string][]string{
+ "first": {"1", "4"},
+ "second": {"2", "5"},
+ "third": {"3", "6"},
+ }
+
+ testdata := []struct {
+ out map[string][]string
+ args []string
+ }{
+ {expected, []string{"first,second,third\n1,2,3\n4,5,6"}},
+ {expected, []string{"first,second,third", "1,2,3\n4,5,6"}},
+ {expected, []string{";", "first;second;third", "1;2;3\n4;5;6"}},
+ {expected, []string{";", "first;second;third\r\n1;2;3\r\n4;5;6"}},
+ {map[string][]string{
+ "A": {"1", "4"},
+ "B": {"2", "5"},
+ "C": {"3", "6"},
+ }, []string{"", "1,2,3\n4,5,6"}},
+ }
+ for _, d := range testdata {
+ out, err := CSVByColumn(d.args...)
+ require.NoError(t, err)
+ assert.Equal(t, d.out, out)
+ }
+}
+
+func TestAutoIndex(t *testing.T) {
+ assert.Equal(t, "A", autoIndex(0))
+ assert.Equal(t, "B", autoIndex(1))
+ assert.Equal(t, "Z", autoIndex(25))
+ assert.Equal(t, "AA", autoIndex(26))
+ assert.Equal(t, "ZZ", autoIndex(51))
+ assert.Equal(t, "AAA", autoIndex(52))
+ assert.Equal(t, "YYYYY", autoIndex(128))
+}
+
+func TestToCSV(t *testing.T) {
+ in := [][]string{
+ {"first", "second", "third"},
+ {"1", "2", "3"},
+ {"4", "5", "6"},
+ }
+ expected := "first,second,third\r\n1,2,3\r\n4,5,6\r\n"
+
+ out, err := ToCSV(in)
+ require.NoError(t, err)
+ assert.Equal(t, expected, out)
+
+ expected = "first;second;third\r\n1;2;3\r\n4;5;6\r\n"
+
+ out, err = ToCSV(";", in)
+ require.NoError(t, err)
+ assert.Equal(t, expected, out)
+
+ _, err = ToCSV(42, [][]int{{1, 2}})
+ assert.Error(t, err)
+
+ _, err = ToCSV([][]int{{1, 2}})
+ assert.Error(t, err)
+
+ expected = "first,second,third\r\n1,2,3\r\n4,5,6\r\n"
+ out, err = ToCSV([][]interface{}{
+ {"first", "second", "third"},
+ {"1", "2", "3"},
+ {"4", "5", "6"},
+ })
+ require.NoError(t, err)
+ assert.Equal(t, expected, out)
+
+ expected = "first|second|third\r\n1|2|3\r\n4|5|6\r\n"
+ out, err = ToCSV("|", []interface{}{
+ []interface{}{"first", "second", "third"},
+ []interface{}{1, "2", 3},
+ []interface{}{"4", 5, "6"},
+ })
+ require.NoError(t, err)
+ assert.Equal(t, expected, out)
+}
+
+func TestTOML(t *testing.T) {
+ in := `# This is a TOML document. Boom.
+
+title = "TOML Example"
+
+[owner]
+name = "Tom Preston-Werner"
+organization = "GitHub"
+bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
+dob = 1979-05-27T07:32:00Z # First class dates? Why not?
+
+[database]
+server = "192.168.1.1"
+ports = [ 8001, 8001, 8002 ]
+connection_max = 5000
+enabled = true
+
+[servers]
+
+ # You can indent as you please. Tabs or spaces. TOML don't care.
+ [servers.alpha]
+ ip = "10.0.0.1"
+ dc = "eqdc10"
+
+ [servers.beta]
+ ip = "10.0.0.2"
+ dc = "eqdc10"
+
+[clients]
+data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
+
+# Line breaks are OK when inside arrays
+hosts = [
+ "alpha",
+ "omega"
+]
+`
+ expected := map[string]interface{}{
+ "title": "TOML Example",
+ "owner": map[string]interface{}{
+ "name": "Tom Preston-Werner",
+ "organization": "GitHub",
+ "bio": "GitHub Cofounder & CEO\nLikes tater tots and beer.",
+ "dob": time.Date(1979, time.May, 27, 7, 32, 0, 0, time.UTC),
+ },
+ "database": map[string]interface{}{
+ "server": "192.168.1.1",
+ "ports": []interface{}{int64(8001), int64(8001), int64(8002)},
+ "connection_max": int64(5000),
+ "enabled": true,
+ },
+ "servers": map[string]interface{}{
+ "alpha": map[string]interface{}{
+ "ip": "10.0.0.1",
+ "dc": "eqdc10",
+ },
+ "beta": map[string]interface{}{
+ "ip": "10.0.0.2",
+ "dc": "eqdc10",
+ },
+ },
+ "clients": map[string]interface{}{
+ "data": []interface{}{
+ []interface{}{"gamma", "delta"},
+ []interface{}{int64(1), int64(2)},
+ },
+ "hosts": []interface{}{"alpha", "omega"},
+ },
+ }
+
+ out, err := TOML(in)
+ require.NoError(t, err)
+ assert.Equal(t, expected, out)
+}
+
+func TestToTOML(t *testing.T) {
+ expected := `foo = "bar"
+one = 1
+true = true
+
+[down]
+ [down.the]
+ [down.the.rabbit]
+ hole = true
+`
+ in := map[string]interface{}{
+ "foo": "bar",
+ "one": 1,
+ "true": true,
+ "down": map[interface{}]interface{}{
+ "the": map[interface{}]interface{}{
+ "rabbit": map[interface{}]interface{}{
+ "hole": true,
+ },
+ },
+ },
+ }
+ out, err := ToTOML(in)
+ require.NoError(t, err)
+ assert.Equal(t, expected, out)
+}
+
+func TestDecryptEJSON(t *testing.T) {
+ privateKey := "e282d979654f88267f7e6c2d8268f1f4314b8673579205ed0029b76de9c8223f"
+ publicKey := "6e05ec625bcdca34864181cc43e6fcc20a57732a453bc2f4a2e117ffdf1a6762"
+ expected := map[string]interface{}{
+ "password": "supersecret",
+ "_unencrypted": "notsosecret",
+ }
+ in := `{
+ "_public_key": "` + publicKey + `",
+ "password": "EJ[1:yJ7n4UorqxkJZMoKevIA1dJeDvaQhkbgENIVZW18jig=:0591iW+paVSh4APOytKBVW/ZcxHO/5wO:TssnpVtkiXmpDIxPlXSiYdgnWyd44stGcwG1]",
+ "_unencrypted": "notsosecret"
+ }`
+
+ t.Setenv("EJSON_KEY", privateKey)
+ actual, err := decryptEJSON(in)
+ require.NoError(t, err)
+ assert.EqualValues(t, expected, actual)
+
+ actual, err = JSON(in)
+ require.NoError(t, err)
+ assert.EqualValues(t, expected, actual)
+
+ tmpDir := fs.NewDir(t, "gomplate-ejsontest",
+ fs.WithFile(publicKey, privateKey),
+ )
+ t.Cleanup(tmpDir.Remove)
+
+ os.Unsetenv("EJSON_KEY")
+ t.Setenv("EJSON_KEY_FILE", tmpDir.Join(publicKey))
+ actual, err = decryptEJSON(in)
+ require.NoError(t, err)
+ assert.EqualValues(t, expected, actual)
+
+ os.Unsetenv("EJSON_KEY")
+ os.Unsetenv("EJSON_KEY_FILE")
+ t.Setenv("EJSON_KEYDIR", tmpDir.Path())
+ actual, err = decryptEJSON(in)
+ require.NoError(t, err)
+ assert.EqualValues(t, expected, actual)
+}
+
+func TestDotEnv(t *testing.T) {
+ in := `FOO=a regular unquoted value
+export BAR=another value, exports are ignored
+
+# comments are totally ignored, as are blank lines
+FOO.BAR = "values can be double-quoted, and shell\nescapes are supported"
+
+BAZ = "variable expansion: ${FOO}"
+QUX='single quotes ignore $variables'
+`
+ expected := map[string]interface{}{
+ "FOO": "a regular unquoted value",
+ "BAR": "another value, exports are ignored",
+ "FOO.BAR": "values can be double-quoted, and shell\nescapes are supported",
+ "BAZ": "variable expansion: a regular unquoted value",
+ "QUX": "single quotes ignore $variables",
+ }
+ out, err := DotEnv(in)
+ require.NoError(t, err)
+ assert.EqualValues(t, expected, out)
+}
+
+func TestStringifyYAMLArrayMapKeys(t *testing.T) {
+ cases := []struct {
+ input []interface{}
+ want []interface{}
+ replaced bool
+ }{
+ {
+ []interface{}{map[interface{}]interface{}{"a": 1, "b": 2}},
+ []interface{}{map[string]interface{}{"a": 1, "b": 2}},
+ false,
+ },
+ {
+ []interface{}{map[interface{}]interface{}{"a": []interface{}{1, map[interface{}]interface{}{"b": 2}}}},
+ []interface{}{map[string]interface{}{"a": []interface{}{1, map[string]interface{}{"b": 2}}}},
+ false,
+ },
+ {
+ []interface{}{map[interface{}]interface{}{true: 1, "b": false}},
+ []interface{}{map[string]interface{}{"true": 1, "b": false}},
+ false,
+ },
+ {
+ []interface{}{map[interface{}]interface{}{1: "a", 2: "b"}},
+ []interface{}{map[string]interface{}{"1": "a", "2": "b"}},
+ false,
+ },
+ {
+ []interface{}{map[interface{}]interface{}{"a": map[interface{}]interface{}{"b": 1}}},
+ []interface{}{map[string]interface{}{"a": map[string]interface{}{"b": 1}}},
+ false,
+ },
+ {
+ []interface{}{map[string]interface{}{"a": map[string]interface{}{"b": 1}}},
+ []interface{}{map[string]interface{}{"a": map[string]interface{}{"b": 1}}},
+ false,
+ },
+ {
+ []interface{}{map[interface{}]interface{}{1: "a", 2: "b"}},
+ []interface{}{map[string]interface{}{"1": "a", "2": "b"}},
+ false,
+ },
+ }
+
+ for _, c := range cases {
+ err := stringifyYAMLArrayMapKeys(c.input)
+ require.NoError(t, err)
+ assert.EqualValues(t, c.want, c.input)
+ }
+}
+
+func TestStringifyYAMLMapMapKeys(t *testing.T) {
+ cases := []struct {
+ input map[string]interface{}
+ want map[string]interface{}
+ }{
+ {
+ map[string]interface{}{"root": map[interface{}]interface{}{"a": 1, "b": 2}},
+ map[string]interface{}{"root": map[string]interface{}{"a": 1, "b": 2}},
+ },
+ {
+ map[string]interface{}{"root": map[interface{}]interface{}{"a": []interface{}{1, map[interface{}]interface{}{"b": 2}}}},
+ map[string]interface{}{"root": map[string]interface{}{"a": []interface{}{1, map[string]interface{}{"b": 2}}}},
+ },
+ {
+ map[string]interface{}{"root": map[interface{}]interface{}{true: 1, "b": false}},
+ map[string]interface{}{"root": map[string]interface{}{"true": 1, "b": false}},
+ },
+ {
+ map[string]interface{}{"root": map[interface{}]interface{}{1: "a", 2: "b"}},
+ map[string]interface{}{"root": map[string]interface{}{"1": "a", "2": "b"}},
+ },
+ {
+ map[string]interface{}{"root": map[interface{}]interface{}{"a": map[interface{}]interface{}{"b": 1}}},
+ map[string]interface{}{"root": map[string]interface{}{"a": map[string]interface{}{"b": 1}}},
+ },
+ {
+ map[string]interface{}{"a": map[string]interface{}{"b": 1}},
+ map[string]interface{}{"a": map[string]interface{}{"b": 1}},
+ },
+ {
+ map[string]interface{}{"root": []interface{}{map[interface{}]interface{}{1: "a", 2: "b"}}},
+ map[string]interface{}{"root": []interface{}{map[string]interface{}{"1": "a", "2": "b"}}},
+ },
+ }
+
+ for _, c := range cases {
+ err := stringifyYAMLMapMapKeys(c.input)
+ require.NoError(t, err)
+ assert.EqualValues(t, c.want, c.input)
+ }
+}
+
+func TestCUE(t *testing.T) {
+ in := `package foo
+import "regexp"
+matches: regexp.FindSubmatch(#"^([^:]*):(\d+)$"#, "localhost:443")
+one: 1
+two: 2
+// A field using quotes.
+"two-and-a-half": 2.5
+list: [ 1, 2, 3 ]
+`
+
+ expected := map[string]interface{}{
+ "matches": []interface{}{
+ "localhost:443",
+ "localhost",
+ "443",
+ },
+ "one": 1,
+ "two": 2,
+ "two-and-a-half": 2.5,
+ "list": []interface{}{1, 2, 3},
+ }
+
+ out, err := CUE(in)
+ require.NoError(t, err)
+ assert.EqualValues(t, expected, out)
+
+ out, err = CUE(`[1,2,3]`)
+ require.NoError(t, err)
+ assert.EqualValues(t, []interface{}{1, 2, 3}, out)
+
+ out, err = CUE(`"hello world"`)
+ require.NoError(t, err)
+ assert.EqualValues(t, "hello world", out)
+
+ out, err = CUE(`true`)
+ require.NoError(t, err)
+ assert.EqualValues(t, true, out)
+
+ out, err = CUE(`'\x00\x01\x02\x03\x04'`)
+ require.NoError(t, err)
+ assert.EqualValues(t, []byte{0, 1, 2, 3, 4}, out)
+
+ out, err = CUE(`42`)
+ require.NoError(t, err)
+ assert.EqualValues(t, 42, out)
+
+ out, err = CUE(`42.0`)
+ require.NoError(t, err)
+ assert.EqualValues(t, 42.0, out)
+
+ out, err = CUE(`null`)
+ require.NoError(t, err)
+ assert.EqualValues(t, nil, out)
+
+ _, err = CUE(`>=0 & <=7 & >=3 & <=10`)
+ require.Error(t, err)
+}
+
+func TestToCUE(t *testing.T) {
+ in := map[string]interface{}{
+ "matches": []interface{}{
+ "localhost:443",
+ "localhost",
+ "443",
+ },
+ "one": 1,
+ "two": 2,
+ "two-and-a-half": 2.5,
+ "list": []interface{}{1, 2, 3},
+ }
+
+ expected := `{
+ "two-and-a-half": 2.5
+ list: [1, 2, 3]
+ two: 2
+ one: 1
+ matches: ["localhost:443", "localhost", "443"]
+}`
+
+ out, err := ToCUE(in)
+ require.NoError(t, err)
+ assert.EqualValues(t, expected, out)
+
+ out, err = ToCUE([]interface{}{1, 2, 3})
+ require.NoError(t, err)
+ assert.EqualValues(t, `[1, 2, 3]`, out)
+
+ out, err = ToCUE("hello world")
+ require.NoError(t, err)
+ assert.EqualValues(t, `"hello world"`, out)
+
+ out, err = ToCUE(true)
+ require.NoError(t, err)
+ assert.EqualValues(t, `true`, out)
+
+ out, err = ToCUE([]byte{0, 1, 2, 3, 4})
+ require.NoError(t, err)
+ assert.EqualValues(t, `'\x00\x01\x02\x03\x04'`, out)
+
+ out, err = ToCUE(42)
+ require.NoError(t, err)
+ assert.EqualValues(t, `42`, out)
+
+ out, err = ToCUE(42.0)
+ require.NoError(t, err)
+ assert.EqualValues(t, `42.0`, out)
+
+ out, err = ToCUE(nil)
+ require.NoError(t, err)
+ assert.EqualValues(t, `null`, out)
+
+ out, err = ToCUE(struct{}{})
+ require.NoError(t, err)
+ assert.EqualValues(t, `{}`, out)
+}
diff --git a/internal/parsers/parser.go b/internal/parsers/parser.go
new file mode 100644
index 00000000..b01972d8
--- /dev/null
+++ b/internal/parsers/parser.go
@@ -0,0 +1,39 @@
+package parsers
+
+import (
+ "fmt"
+
+ "github.com/hairyhenderson/gomplate/v4/internal/iohelpers"
+)
+
+func ParseData(mimeType, s string) (out any, err error) {
+ switch iohelpers.MimeAlias(mimeType) {
+ case iohelpers.JSONMimetype:
+ out, err = JSON(s)
+ if err != nil {
+ // maybe it's a JSON array
+ out, err = JSONArray(s)
+ }
+ case iohelpers.JSONArrayMimetype:
+ out, err = JSONArray(s)
+ case iohelpers.YAMLMimetype:
+ out, err = YAML(s)
+ if err != nil {
+ // maybe it's a YAML array
+ out, err = YAMLArray(s)
+ }
+ case iohelpers.CSVMimetype:
+ out, err = CSV(s)
+ case iohelpers.TOMLMimetype:
+ out, err = TOML(s)
+ case iohelpers.EnvMimetype:
+ out, err = DotEnv(s)
+ case iohelpers.TextMimetype:
+ out = s
+ case iohelpers.CUEMimetype:
+ out, err = CUE(s)
+ default:
+ return nil, fmt.Errorf("data of type %q not yet supported", mimeType)
+ }
+ return out, err
+}
diff --git a/internal/tests/integration/basic_test.go b/internal/tests/integration/basic_test.go
index 49a3b168..f8aec7cf 100644
--- a/internal/tests/integration/basic_test.go
+++ b/internal/tests/integration/basic_test.go
@@ -6,6 +6,7 @@ import (
"testing"
"github.com/hairyhenderson/gomplate/v4/internal/iohelpers"
+ "github.com/stretchr/testify/require"
"gotest.tools/v3/assert"
"gotest.tools/v3/assert/cmp"
tfs "gotest.tools/v3/fs"
@@ -29,7 +30,7 @@ func setupBasicTest(t *testing.T) *tfs.Dir {
func TestBasic_ReportsVersion(t *testing.T) {
o, e, err := cmd(t, "-v").run()
- assert.NilError(t, err)
+ require.NoError(t, err)
assert.Equal(t, "", e)
assert.Assert(t, cmp.Contains(o, "gomplate version "))
}
@@ -88,11 +89,11 @@ func TestBasic_RoutesInputsToProperOutputs(t *testing.T) {
}
for _, v := range testdata {
info, err := os.Stat(v.path)
- assert.NilError(t, err)
+ require.NoError(t, err)
m := iohelpers.NormalizeFileMode(v.mode)
assert.Equal(t, m, info.Mode(), v.path)
content, err := os.ReadFile(v.path)
- assert.NilError(t, err)
+ require.NoError(t, err)
assert.Equal(t, v.content, string(content))
}
}
@@ -209,10 +210,10 @@ func TestBasic_RoutesInputsToProperOutputsWithChmod(t *testing.T) {
}
for _, v := range testdata {
info, err := os.Stat(v.path)
- assert.NilError(t, err)
+ require.NoError(t, err)
assert.Equal(t, iohelpers.NormalizeFileMode(v.mode), info.Mode())
content, err := os.ReadFile(v.path)
- assert.NilError(t, err)
+ require.NoError(t, err)
assert.Equal(t, v.content, string(content))
}
}
@@ -237,10 +238,10 @@ func TestBasic_OverridesOutputModeWithChmod(t *testing.T) {
}
for _, v := range testdata {
info, err := os.Stat(v.path)
- assert.NilError(t, err)
+ require.NoError(t, err)
assert.Equal(t, iohelpers.NormalizeFileMode(v.mode), info.Mode())
content, err := os.ReadFile(v.path)
- assert.NilError(t, err)
+ require.NoError(t, err)
assert.Equal(t, v.content, string(content))
}
}
@@ -254,13 +255,13 @@ func TestBasic_AppliesChmodBeforeWrite(t *testing.T) {
"-f", tmpDir.Join("one"),
"-o", out,
"--chmod", "0644").run()
- assert.NilError(t, err)
+ require.NoError(t, err)
info, err := os.Stat(out)
- assert.NilError(t, err)
+ require.NoError(t, err)
assert.Equal(t, iohelpers.NormalizeFileMode(0o644), info.Mode())
content, err := os.ReadFile(out)
- assert.NilError(t, err)
+ require.NoError(t, err)
assert.Equal(t, "hi\n", string(content))
}
@@ -271,10 +272,10 @@ func TestBasic_CreatesMissingDirectory(t *testing.T) {
assertSuccess(t, o, e, err, "")
info, err := os.Stat(out)
- assert.NilError(t, err)
+ require.NoError(t, err)
assert.Equal(t, iohelpers.NormalizeFileMode(0o640), info.Mode())
content, err := os.ReadFile(out)
- assert.NilError(t, err)
+ require.NoError(t, err)
assert.Equal(t, "hi\n", string(content))
out = tmpDir.Join("outdir")
@@ -285,7 +286,7 @@ func TestBasic_CreatesMissingDirectory(t *testing.T) {
assertSuccess(t, o, e, err, "")
info, err = os.Stat(out)
- assert.NilError(t, err)
+ require.NoError(t, err)
assert.Equal(t, iohelpers.NormalizeFileMode(0o755|fs.ModeDir), info.Mode())
assert.Equal(t, true, info.IsDir())
diff --git a/internal/tests/integration/datasources_consul_test.go b/internal/tests/integration/datasources_consul_test.go
index cbf154d6..f04d5020 100644
--- a/internal/tests/integration/datasources_consul_test.go
+++ b/internal/tests/integration/datasources_consul_test.go
@@ -139,23 +139,28 @@ func TestDatasources_Consul_ListKeys(t *testing.T) {
consulPut(t, consulAddr, "list-of-keys/foo2", "bar2")
// Get a list of keys using the ds args
- expectedResult := `[{"key":"foo1","value":"{\"bar1\": \"bar1\"}"},{"key":"foo2","value":"bar2"}]`
+ // expectedResult := `[{"key":"foo1","value":"{\"bar1\": \"bar1\"}"},{"key":"foo2","value":"bar2"}]`
+ expectedResult := `["foo1","foo2"]`
o, e, err := cmd(t, "-d", "consul=consul://",
"-i", `{{(ds "consul" "list-of-keys/") | data.ToJSON }}`).
withEnv("CONSUL_HTTP_ADDR", "http://"+consulAddr).run()
assertSuccess(t, o, e, err, expectedResult)
// Get a list of keys using the ds uri
- expectedResult = `[{"key":"foo1","value":"{\"bar1\": \"bar1\"}"},{"key":"foo2","value":"bar2"}]`
+ // expectedResult = `[{"key":"foo1","value":"{\"bar1\": \"bar1\"}"},{"key":"foo2","value":"bar2"}]`
+ expectedResult = `["foo1","foo2"]`
o, e, err = cmd(t, "-d", "consul=consul+http://"+consulAddr+"/list-of-keys/",
"-i", `{{(ds "consul" ) | data.ToJSON }}`).run()
assertSuccess(t, o, e, err, expectedResult)
- // Get a specific value from the list of Consul keys
- expectedResult = `{"bar1": "bar1"}`
- o, e, err = cmd(t, "-d", "consul=consul+http://"+consulAddr+"/list-of-keys/",
- "-i", `{{ $data := ds "consul" }}{{ (index $data 0).value }}`).run()
- assertSuccess(t, o, e, err, expectedResult)
+ // TODO: this doesn't work anymore because consulfs returns a directory
+ // listing now.
+ //
+ // // Get a specific value from the list of Consul keys
+ // expectedResult = `{"bar1": "bar1"}`
+ // o, e, err = cmd(t, "-d", "consul=consul+http://"+consulAddr+"/list-of-keys/",
+ // "-i", `{{ $data := ds "consul" }}{{ (index $data 0).value }}`).run()
+ // assertSuccess(t, o, e, err, expectedResult)
}
func TestDatasources_Consul_WithVaultAuth(t *testing.T) {
diff --git a/internal/tests/integration/datasources_file_test.go b/internal/tests/integration/datasources_file_test.go
index 9af8503b..37e65a0f 100644
--- a/internal/tests/integration/datasources_file_test.go
+++ b/internal/tests/integration/datasources_file_test.go
@@ -1,6 +1,7 @@
package integration
import (
+ "path/filepath"
"testing"
"gotest.tools/v3/fs"
@@ -83,7 +84,7 @@ func TestDatasources_File(t *testing.T) {
"-i", `{{(datasource "config").foo.bar}}`).run()
assertSuccess(t, o, e, err, "baz")
- o, e, err = cmd(t, "-d", "dir="+tmpDir.Path(),
+ o, e, err = cmd(t, "-d", "dir="+tmpDir.Path()+string(filepath.Separator),
"-i", `{{ (datasource "dir" "config.json").foo.bar }}`).run()
assertSuccess(t, o, e, err, "baz")
diff --git a/internal/tests/integration/datasources_git_test.go b/internal/tests/integration/datasources_git_test.go
index fbc8e85d..cb8c78a9 100644
--- a/internal/tests/integration/datasources_git_test.go
+++ b/internal/tests/integration/datasources_git_test.go
@@ -15,6 +15,8 @@ import (
)
func setupDatasourcesGitTest(t *testing.T) *fs.Dir {
+ t.Helper()
+
tmpDir := fs.NewDir(t, "gomplate-inttests",
fs.WithDir("repo",
fs.WithFiles(map[string]string{
@@ -44,6 +46,8 @@ func setupDatasourcesGitTest(t *testing.T) *fs.Dir {
}
func startGitDaemon(t *testing.T) string {
+ t.Helper()
+
tmpDir := setupDatasourcesGitTest(t)
pidDir := fs.NewDir(t, "gomplate-inttests-pid")
diff --git a/internal/tests/integration/datasources_vault_ec2_test.go b/internal/tests/integration/datasources_vault_ec2_test.go
index fb34f0eb..9e4a5192 100644
--- a/internal/tests/integration/datasources_vault_ec2_test.go
+++ b/internal/tests/integration/datasources_vault_ec2_test.go
@@ -35,6 +35,10 @@ func setupDatasourcesVaultEc2Test(t *testing.T) (*fs.Dir, *vaultClient, *httptes
w.Write([]byte("testtoken"))
}))
+ mux.HandleFunc("/latest/meta-data/instance-id", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ t.Logf("IMDS request: %s %s", r.Method, r.URL)
+ w.Write([]byte("i-00000000"))
+ }))
mux.HandleFunc("/sts/", stsHandler)
mux.HandleFunc("/ec2/", ec2Handler)
mux.HandleFunc("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@@ -74,6 +78,7 @@ func TestDatasources_VaultEc2(t *testing.T) {
"endpoint": srv.URL + "/ec2",
"iam_endpoint": srv.URL + "/iam",
"sts_endpoint": srv.URL + "/sts",
+ "sts_region": "us-east-1",
})
require.NoError(t, err)
@@ -88,11 +93,11 @@ func TestDatasources_VaultEc2(t *testing.T) {
})
require.NoError(t, err)
- o, e, err := cmd(t, "-d", "vault=vault:///secret",
+ o, e, err := cmd(t, "-d", "vault=vault:///secret/",
"-i", `{{(ds "vault" "foo").value}}`).
withEnv("HOME", tmpDir.Join("home")).
withEnv("VAULT_ADDR", "http://"+v.addr).
- withEnv("AWS_META_ENDPOINT", srv.URL).
+ withEnv("AWS_EC2_METADATA_SERVICE_ENDPOINT", srv.URL).
run()
assertSuccess(t, o, e, err, "bar")
}
diff --git a/internal/tests/integration/datasources_vault_test.go b/internal/tests/integration/datasources_vault_test.go
index 39ccb549..448fa759 100644
--- a/internal/tests/integration/datasources_vault_test.go
+++ b/internal/tests/integration/datasources_vault_test.go
@@ -20,6 +20,8 @@ import (
const vaultRootToken = "00000000-1111-2222-3333-444455556666"
func setupDatasourcesVaultTest(t *testing.T) *vaultClient {
+ t.Helper()
+
_, vaultClient := startVault(t)
err := vaultClient.vc.Sys().PutPolicy("writepol", `path "*" {
@@ -30,7 +32,7 @@ func setupDatasourcesVaultTest(t *testing.T) *vaultClient {
capabilities = ["read","delete"]
}`)
require.NoError(t, err)
- err = vaultClient.vc.Sys().PutPolicy("listPol", `path "*" {
+ err = vaultClient.vc.Sys().PutPolicy("listpol", `path "*" {
capabilities = ["read","list","delete"]
}`)
require.NoError(t, err)
@@ -39,6 +41,8 @@ func setupDatasourcesVaultTest(t *testing.T) *vaultClient {
}
func startVault(t *testing.T) (*fs.Dir, *vaultClient) {
+ t.Helper()
+
pidDir := fs.NewDir(t, "gomplate-inttests-vaultpid")
t.Cleanup(pidDir.Remove)
@@ -85,6 +89,8 @@ func startVault(t *testing.T) (*fs.Dir, *vaultClient) {
result.Assert(t, icmd.Expected{ExitCode: 0})
+ t.Log(result.Combined())
+
// restore old token if it was backed up
u, _ := user.Current()
homeDir := u.HomeDir
@@ -106,30 +112,32 @@ func TestDatasources_Vault_TokenAuth(t *testing.T) {
tok, err := v.tokenCreate("readpol", 5)
require.NoError(t, err)
- o, e, err := cmd(t, "-d", "vault=vault:///secret",
+ o, e, err := cmd(t, "-d", "vault=vault:///secret/",
"-i", `{{(ds "vault" "foo").value}}`).
withEnv("VAULT_ADDR", "http://"+v.addr).
withEnv("VAULT_TOKEN", tok).
run()
assertSuccess(t, o, e, err, "bar")
- o, e, err = cmd(t, "-d", "vault=vault+http://"+v.addr+"/secret",
+ o, e, err = cmd(t, "-d", "vault=vault+http://"+v.addr+"/secret/",
"-i", `{{(ds "vault" "foo").value}}`).
withEnv("VAULT_TOKEN", tok).
run()
assertSuccess(t, o, e, err, "bar")
- _, _, err = cmd(t, "-d", "vault=vault:///secret",
+ _, _, err = cmd(t, "-d", "vault=vault:///secret/",
"-i", `{{(ds "vault" "bar").value}}`).
withEnv("VAULT_ADDR", "http://"+v.addr).
withEnv("VAULT_TOKEN", tok).
run()
- assert.ErrorContains(t, err, "error calling ds: couldn't read datasource 'vault': no value found for path /secret/bar")
+ assert.ErrorContains(t, err, "error calling ds: couldn't read datasource 'vault':")
+ assert.ErrorContains(t, err, "stat secret/bar")
+ assert.ErrorContains(t, err, "file does not exist")
tokFile := fs.NewFile(t, "test-vault-token", fs.WithContent(tok))
defer tokFile.Remove()
- o, e, err = cmd(t, "-d", "vault=vault:///secret",
+ o, e, err = cmd(t, "-d", "vault=vault:///secret/",
"-i", `{{(ds "vault" "foo").value}}`).
withEnv("VAULT_ADDR", "http://"+v.addr).
withEnv("VAULT_TOKEN_FILE", tokFile.Path()).
@@ -157,7 +165,7 @@ func TestDatasources_Vault_UserPassAuth(t *testing.T) {
})
require.NoError(t, err)
- o, e, err := cmd(t, "-d", "vault=vault:///secret",
+ o, e, err := cmd(t, "-d", "vault=vault:///secret/",
"-i", `{{(ds "vault" "foo").value}}`).
withEnv("VAULT_ADDR", "http://"+v.addr).
withEnv("VAULT_AUTH_USERNAME", "dave").
@@ -170,7 +178,7 @@ func TestDatasources_Vault_UserPassAuth(t *testing.T) {
defer userFile.Remove()
defer passFile.Remove()
o, e, err = cmd(t,
- "-d", "vault=vault:///secret",
+ "-d", "vault=vault:///secret/",
"-i", `{{(ds "vault" "foo").value}}`).
withEnv("VAULT_ADDR", "http://"+v.addr).
withEnv("VAULT_AUTH_USERNAME_FILE", userFile.Path()).
@@ -179,7 +187,7 @@ func TestDatasources_Vault_UserPassAuth(t *testing.T) {
assertSuccess(t, o, e, err, "bar")
o, e, err = cmd(t,
- "-d", "vault=vault:///secret",
+ "-d", "vault=vault:///secret/",
"-i", `{{(ds "vault" "foo").value}}`).
withEnv("VAULT_ADDR", "http://"+v.addr).
withEnv("VAULT_AUTH_USERNAME", "dave").
@@ -216,7 +224,7 @@ func TestDatasources_Vault_AppRoleAuth(t *testing.T) {
sid, _ := v.vc.Logical().Write("auth/approle/role/testrole/secret-id", nil)
secretID := sid.Data["secret_id"].(string)
o, e, err := cmd(t,
- "-d", "vault=vault:///secret",
+ "-d", "vault=vault:///secret/",
"-i", `{{(ds "vault" "foo").value}}`).
withEnv("VAULT_ADDR", "http://"+v.addr).
withEnv("VAULT_ROLE_ID", roleID).
@@ -229,7 +237,7 @@ func TestDatasources_Vault_AppRoleAuth(t *testing.T) {
sid, _ = v.vc.Logical().Write("auth/approle2/role/testrole/secret-id", nil)
secretID = sid.Data["secret_id"].(string)
o, e, err = cmd(t,
- "-d", "vault=vault:///secret",
+ "-d", "vault=vault:///secret/",
"-i", `{{(ds "vault" "foo").value}}`).
withEnv("VAULT_ADDR", "http://"+v.addr).
withEnv("VAULT_ROLE_ID", roleID).
@@ -258,7 +266,8 @@ func TestDatasources_Vault_DynamicAuth(t *testing.T) {
{"vault=vault:///ssh/creds/test?ip=10.1.2.3&username=user", `{{(ds "vault").ip}}`},
{"vault=vault:///?ip=10.1.2.3&username=user", `{{(ds "vault" "ssh/creds/test").ip}}`},
}
- tok, err := v.tokenCreate("writepol", len(testCommands)*2)
+
+ tok, err := v.tokenCreate("writepol", len(testCommands)*4)
require.NoError(t, err)
for _, tc := range testCommands {
@@ -277,7 +286,7 @@ func TestDatasources_Vault_List(t *testing.T) {
v.vc.Logical().Write("secret/dir/bar", map[string]interface{}{"value": "two"})
defer v.vc.Logical().Delete("secret/dir/foo")
defer v.vc.Logical().Delete("secret/dir/bar")
- tok, err := v.tokenCreate("listpol", 5)
+ tok, err := v.tokenCreate("listpol", 15)
require.NoError(t, err)
o, e, err := cmd(t,
@@ -289,7 +298,7 @@ func TestDatasources_Vault_List(t *testing.T) {
assertSuccess(t, o, e, err, "bar: two foo: one ")
o, e, err = cmd(t,
- "-d", "vault=vault+http://"+v.addr+"/secret",
+ "-d", "vault=vault+http://"+v.addr+"/secret/",
"-i", `{{ range (ds "vault" "dir/" ) }}{{ . }} {{end}}`).
withEnv("VAULT_TOKEN", tok).
run()
diff --git a/internal/tests/integration/integration_test.go b/internal/tests/integration/integration_test.go
index a57d8d0b..e80ed350 100644
--- a/internal/tests/integration/integration_test.go
+++ b/internal/tests/integration/integration_test.go
@@ -256,7 +256,9 @@ func (c *command) runInProcess() (o, e string, err error) {
stdin := strings.NewReader(c.stdin)
- ctx := context.Background()
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
err = gcmd.Main(ctx, c.args, stdin, stdout, stderr)
return stdout.String(), stderr.String(), err
diff --git a/internal/urlhelpers/urlhelpers.go b/internal/urlhelpers/urlhelpers.go
new file mode 100644
index 00000000..d3de4ce8
--- /dev/null
+++ b/internal/urlhelpers/urlhelpers.go
@@ -0,0 +1,46 @@
+package urlhelpers
+
+import (
+ "net/url"
+ "path"
+ "path/filepath"
+)
+
+// ParseSourceURL parses a datasource URL value, which may be '-' (for stdin://),
+// or it may be a Windows path (with driver letter and back-slash separators) or
+// UNC, or it may be relative. It also might just be a regular absolute URL...
+// In all cases it returns a correct URL for the value. It may be a relative URL
+// in which case the scheme should be assumed to be 'file'
+func ParseSourceURL(value string) (*url.URL, error) {
+ if value == "-" {
+ value = "stdin://"
+ }
+ value = filepath.ToSlash(value)
+ // handle absolute Windows paths
+ volName := ""
+ if volName = filepath.VolumeName(value); volName != "" {
+ // handle UNCs
+ if len(volName) > 2 {
+ value = "file:" + value
+ } else {
+ value = "file:///" + value
+ }
+ }
+ srcURL, err := url.Parse(value)
+ if err != nil {
+ return nil, err
+ }
+
+ if volName != "" && len(srcURL.Path) >= 3 {
+ if srcURL.Path[0] == '/' && srcURL.Path[2] == ':' {
+ srcURL.Path = srcURL.Path[1:]
+ }
+ }
+
+ // if it's an absolute path with no scheme, assume it's a file
+ if srcURL.Scheme == "" && path.IsAbs(srcURL.Path) {
+ srcURL.Scheme = "file"
+ }
+
+ return srcURL, nil
+}
diff --git a/internal/urlhelpers/urlhelpers_test.go b/internal/urlhelpers/urlhelpers_test.go
new file mode 100644
index 00000000..9a0df386
--- /dev/null
+++ b/internal/urlhelpers/urlhelpers_test.go
@@ -0,0 +1,42 @@
+package urlhelpers
+
+import (
+ "net/url"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestParseSourceURL(t *testing.T) {
+ expected := &url.URL{
+ Scheme: "http",
+ Host: "example.com",
+ Path: "/foo.json",
+ RawQuery: "bar",
+ }
+ u, err := ParseSourceURL("http://example.com/foo.json?bar")
+ require.NoError(t, err)
+ assert.EqualValues(t, expected, u)
+
+ expected = &url.URL{Scheme: "", Path: ""}
+ u, err = ParseSourceURL("")
+ require.NoError(t, err)
+ assert.EqualValues(t, expected, u)
+
+ expected = &url.URL{Scheme: "stdin"}
+ u, err = ParseSourceURL("-")
+ require.NoError(t, err)
+ assert.EqualValues(t, expected, u)
+
+ // behviour change in v4 - return relative if it's relative
+ expected = &url.URL{Path: "./foo/bar.json"}
+ u, err = ParseSourceURL("./foo/bar.json")
+ require.NoError(t, err)
+ assert.EqualValues(t, expected, u)
+
+ expected = &url.URL{Scheme: "file", Path: "/absolute/bar.json"}
+ u, err = ParseSourceURL("/absolute/bar.json")
+ require.NoError(t, err)
+ assert.EqualValues(t, expected, u)
+}