Skip to content

Commit

Permalink
chore: disable sql connection when graphql is disabled (#405)
Browse files Browse the repository at this point in the history
Signed-off-by: Mikhail Swift <[email protected]>
  • Loading branch information
mikhailswift authored Oct 25, 2024
1 parent 08fae4a commit 9b9b4e9
Show file tree
Hide file tree
Showing 6 changed files with 101 additions and 94 deletions.
62 changes: 32 additions & 30 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -88,36 +88,38 @@ file.

Archivista is configured through environment variables currently.

| Variable | Default Value | Description |
| ------------------------------------------ | ----------------------------------------- | --------------------------------------------------------------------------------------------- |
| ARCHIVISTA_LISTEN_ON | tcp://127.0.0.1:8082 | URL endpoint for Archivista to listen on |
| ARCHIVISTA_READ_TIMEOUT | 120 | HTTP server read timeout |
| ARCHIVISTA_WRITE_TIMEOUT | 120 | HTTP server write timeout |
| ARCHIVISTA_LOG_LEVEL | INFO | Log level. Options are DEBUG, INFO, WARN, ERROR |
| ARCHIVISTA_CORS_ALLOW_ORIGINS | | Comma separated list of origins to allow CORS requests from |
| ARCHIVISTA_SQL_STORE_BACKEND | | Backend to use for SQL. Options are MYSQL or PSQL |
| ARCHIVISTA_SQL_STORE_CONNECTION_STRING | postgresql://root:example@tcp(db)/testify | SQL store connection string |
| ARCHIVISTA_STORAGE_BACKEND | | Backend to use for attestation storage. Options are FILE, BLOB, or empty string for disabled. |
| ARCHIVISTA_FILE_SERVE_ON | | What address to serve files on. Only valid when using FILE storage backend (e.g. `:8081`). |
| ARCHIVISTA_FILE_DIR | /tmp/archivista/ | Directory to store and serve files. Only valid when using FILE storage backend. |
| ARCHIVISTA_BLOB_STORE_ENDPOINT | 127.0.0.1:9000 | URL endpoint for blob storage. Only valid when using BLOB storage backend. |
| ARCHIVISTA_BLOB_STORE_CREDENTIAL_TYPE | | Blob store credential type. Options are IAM or ACCESS_KEY. |
| ARCHIVISTA_BLOB_STORE_ACCESS_KEY_ID | | Blob store access key id. Only valid when using BLOB storage backend. |
| ARCHIVISTA_BLOB_STORE_SECRET_ACCESS_KEY_ID | | Blob store secret access key id. Only valid when using BLOB storage backend. |
| ARCHIVISTA_BLOB_STORE_USE_TLS | TRUE | Use TLS for BLOB storage backend. Only valid when using BLOB storage backend. |
| ARCHIVISTA_BLOB_STORE_BUCKET_NAME | | Bucket to use for storage. Only valid when using BLOB storage backend. |
| ARCHIVISTA_ENABLE_GRAPHQL | TRUE | Enable GraphQL Endpoint |
| ARCHIVISTA_GRAPHQL_WEB_CLIENT_ENABLE | TRUE | Enable GraphiQL, the GraphQL web client |
| ARCHIVISTA_ENABLE_ARTIFACT_STORE | FALSE | Enable Artifact Store Endpoints |
| ARCHIVISTA_ARTIFACT_STORE_CONFIG | /tmp/artifacts/config.yaml | Location of the config describing available artifacts |
| ARCHIVISTA_PUBLISHER | "" | Publisher to use. Options are DAPR, RSTUF. Supports multiple, Comma-separated list of String |
| ARCHIVISTA_PUBLISHER_DAPR_HOST | localhost | Dapr host |
| ARCHIVISTA_PUBLISHER_DAPR_PORT | 3500 | Dapr port |
| ARCHIVISTA_PUBLISHER_DAPR_COMPONENT_NAME | "archivista" | Dapr pubsub component name |
| ARCHIVISTA_PUBLISHER_DAPR_TOPIC | "attestations" | Dapr pubsub topic |
| ARCHIVISTA_PUBLISHER_DAPR_URL | | Dapr full URL |
| ARCHIVISTA_PUBLISHER_RSTUF_HOST | | RSTUF URL |

**Note**: If `ARCHIVISTA_ENABLE_SQL_STORE` is set to false no metadata about store attestations will be collected. Archivista will only store and retrieve attestations by Gitoid from it's storage. Archivista servers with GraphQL or SQL store disabled cannot be used to verify Witness policies.

| Variable | Default Value | Description |
| ------------------------------------------ | ----------------------------------------- | ----------------------------------------------------------------------------------------------------------- |
| ARCHIVISTA_LISTEN_ON | tcp://127.0.0.1:8082 | URL endpoint for Archivista to listen on |
| ARCHIVISTA_READ_TIMEOUT | 120 | HTTP server read timeout |
| ARCHIVISTA_WRITE_TIMEOUT | 120 | HTTP server write timeout |
| ARCHIVISTA_LOG_LEVEL | INFO | Log level. Options are DEBUG, INFO, WARN, ERROR |
| ARCHIVISTA_CORS_ALLOW_ORIGINS | | Comma separated list of origins to allow CORS requests from |
| ARCHIVISTA_ENABLE_SQL_STORE | TRUE | Enable SQL Metadata store. If disabled, GraphQL will also be disabled |
| ARCHIVISTA_SQL_STORE_BACKEND | | Backend to use for SQL. Options are MYSQL or PSQL |
| ARCHIVISTA_SQL_STORE_CONNECTION_STRING | postgresql://root:example@tcp(db)/testify | SQL store connection string |
| ARCHIVISTA_STORAGE_BACKEND | | Backend to use for attestation storage. Options are FILE, BLOB, or empty string for disabled. |
| ARCHIVISTA_FILE_SERVE_ON | | What address to serve files on. Only valid when using FILE storage backend (e.g. `:8081`). |
| ARCHIVISTA_FILE_DIR | /tmp/archivista/ | Directory to store and serve files. Only valid when using FILE storage backend. |
| ARCHIVISTA_BLOB_STORE_ENDPOINT | 127.0.0.1:9000 | URL endpoint for blob storage. Only valid when using BLOB storage backend. |
| ARCHIVISTA_BLOB_STORE_CREDENTIAL_TYPE | | Blob store credential type. Options are IAM or ACCESS_KEY. |
| ARCHIVISTA_BLOB_STORE_ACCESS_KEY_ID | | Blob store access key id. Only valid when using BLOB storage backend. |
| ARCHIVISTA_BLOB_STORE_SECRET_ACCESS_KEY_ID | | Blob store secret access key id. Only valid when using BLOB storage backend. |
| ARCHIVISTA_BLOB_STORE_USE_TLS | TRUE | Use TLS for BLOB storage backend. Only valid when using BLOB storage backend. |
| ARCHIVISTA_BLOB_STORE_BUCKET_NAME | | Bucket to use for storage. Only valid when using BLOB storage backend. |
| ARCHIVISTA_ENABLE_GRAPHQL | TRUE | Enable GraphQL Endpoint. Archivista servers with GraphQL disabled cannot be used to verify Witness policies |
| ARCHIVISTA_GRAPHQL_WEB_CLIENT_ENABLE | TRUE | Enable GraphiQL, the GraphQL web client |
| ARCHIVISTA_ENABLE_ARTIFACT_STORE | FALSE | Enable Artifact Store Endpoints |
| ARCHIVISTA_ARTIFACT_STORE_CONFIG | /tmp/artifacts/config.yaml | Location of the config describing available artifacts |
| ARCHIVISTA_PUBLISHER | "" | Publisher to use. Options are DAPR, RSTUF. Supports multiple, Comma-separated list of String |
| ARCHIVISTA_PUBLISHER_DAPR_HOST | localhost | Dapr host |
| ARCHIVISTA_PUBLISHER_DAPR_PORT | 3500 | Dapr port |
| ARCHIVISTA_PUBLISHER_DAPR_COMPONENT_NAME | "archivista" | Dapr pubsub component name |
| ARCHIVISTA_PUBLISHER_DAPR_TOPIC | "attestations" | Dapr pubsub topic |
| ARCHIVISTA_PUBLISHER_DAPR_URL | | Dapr full URL |
| ARCHIVISTA_PUBLISHER_RSTUF_HOST | | RSTUF URL |

## Using Archivista

Expand Down
52 changes: 29 additions & 23 deletions entrypoint.sh
Original file line number Diff line number Diff line change
Expand Up @@ -13,34 +13,40 @@
# See the License for the specific language governing permissions and
# limitations under the License.

if [[ -z $ARCHIVISTA_SQL_STORE_BACKEND ]]; then
SQL_TYPE="MYSQL"
ARCHIVISTA_ENABLE_SQL_STORE=$(echo ${ARCHIVISTA_ENABLE_SQL_STORE} | tr '[:lower:]' '[:upper:]')

if [ "${ARCHIVISTA_ENABLE_SQL_STORE}" = "FALSE" ]; then
echo "Skipping migrations"
else
if [[ -z $ARCHIVISTA_SQL_STORE_BACKEND ]]; then
SQL_TYPE="MYSQL"
else
SQL_TYPE=$(echo "$ARCHIVISTA_SQL_STORE_BACKEND" | tr '[:lower:]' '[:upper:]')
fi
case $SQL_TYPE in
MYSQL)
if [[ -z $ARCHIVISTA_SQL_STORE_CONNECTION_STRING ]]; then
ARCHIVISTA_SQL_STORE_CONNECTION_STRING="root:example@db/testify"
fi
echo "Running migrations for MySQL"
atlas migrate apply --dir "file:///archivista/migrations/mysql" --url "mysql://$ARCHIVISTA_SQL_STORE_CONNECTION_STRING"
atlas_rc=$?
;;
PSQL)
echo "Running migrations for Postgres"
atlas migrate apply --dir "file:///archivista/migrations/pgsql" --url "$ARCHIVISTA_SQL_STORE_CONNECTION_STRING"
atlas_rc=$?
;;
*)
echo "Unknown SQL backend: $ARCHIVISTA_SQL_STORE_BACKEND"
exit 1
;;
esac
fi
case $SQL_TYPE in
MYSQL)
if [[ -z $ARCHIVISTA_SQL_STORE_CONNECTION_STRING ]]; then
ARCHIVISTA_SQL_STORE_CONNECTION_STRING="root:example@db/testify"
fi
echo "Running migrations for MySQL"
atlas migrate apply --dir "file:///archivista/migrations/mysql" --url "mysql://$ARCHIVISTA_SQL_STORE_CONNECTION_STRING"
atlas_rc=$?
;;
PSQL)
echo "Running migrations for Postgres"
atlas migrate apply --dir "file:///archivista/migrations/pgsql" --url "$ARCHIVISTA_SQL_STORE_CONNECTION_STRING"
atlas_rc=$?
;;
*)
echo "Unknown SQL backend: $ARCHIVISTA_SQL_STORE_BACKEND"
exit 1
;;
esac

if [[ $atlas_rc -ne 0 ]]; then
if [[ $atlas_rc -ne 0 ]]; then
echo "Failed to apply migrations"
exit 1
fi
fi

/bin/archivista
5 changes: 3 additions & 2 deletions pkg/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ type Config struct {
SPIFFEAddress string `default:"unix:///tmp/spire-agent/public/api.sock" desc:"SPIFFE server address" split_words:"true"`
SPIFFETrustedServerId string `default:"" desc:"Trusted SPIFFE server ID; defaults to any" split_words:"true"`

EnableSQLStore bool `default:"TRUE" desc:"*** Enable SQL Metadata store. If disabled, GraphQL will also be disabled ***" split_words:"true"`
SQLStoreConnectionString string `default:"root:example@tcp(db)/testify" desc:"SQL store connection string" split_words:"true"`
SQLStoreBackend string `default:"MYSQL" desc:"SQL backend to use. Options are MYSQL, PSQL" split_words:"true"`
SQLStoreMaxIdleConnections int `default:"10" desc:"Maximum number of connections in the idle connection pool" split_words:"true"`
Expand All @@ -51,7 +52,7 @@ type Config struct {
BlobStoreUseTLS bool `default:"TRUE" desc:"Use TLS for BLOB storage backend. Only valid when using BLOB storage backend." split_words:"true"`
BlobStoreBucketName string `default:"" desc:"Bucket to use for storage. Only valid when using BLOB storage backend." split_words:"true"`

EnableGraphql bool `default:"TRUE" desc:"*** Enable GraphQL Endpoint" split_words:"true"`
EnableGraphql bool `default:"TRUE" desc:"*** Enable GraphQL Endpoint. If GraphQL is disabled, Archivista will be unable to be used by Witness to verify policies" split_words:"true"`
GraphqlWebClientEnable bool `default:"TRUE" desc:"Enable GraphiQL, the GraphQL web client" split_words:"true"`

EnableArtifactStore bool `default:"FALSE" desc:"*** Enable Artifact Store Endpoints" split_words:"true"`
Expand Down Expand Up @@ -86,7 +87,7 @@ func (c *Config) Process() error {
}
}

//check if both are being used and error if so
// check if both are being used and error if so
if usingDeprecatedEnv && usingNewEnv {
err := errors.New("both deprecated and new environment variables are being used. Please use only the new environment variables")
return err
Expand Down
13 changes: 7 additions & 6 deletions pkg/server/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,14 +110,14 @@ func New(cfg *config.Config, opts ...Option) (Server, error) {
// TODO: remove from future version (v0.6.0) endpoint with version
r.HandleFunc("/download/{gitoid}", s.DownloadHandler)
r.HandleFunc("/upload", s.UploadHandler)
if cfg.EnableGraphql {
if cfg.EnableSQLStore && cfg.EnableGraphql {
r.Handle("/query", s.Query(s.sqlClient))
r.Handle("/v1/query", s.Query(s.sqlClient))
}

r.HandleFunc("/v1/download/{gitoid}", s.DownloadHandler)
r.HandleFunc("/v1/upload", s.UploadHandler)
if cfg.GraphqlWebClientEnable {
if cfg.EnableSQLStore && cfg.EnableGraphql && cfg.GraphqlWebClientEnable {
r.Handle("/",
playground.Handler("Archivista", "/v1/query"),
)
Expand Down Expand Up @@ -171,9 +171,11 @@ func (s *Server) Upload(ctx context.Context, r io.Reader) (api.UploadResponse, e
}
}

if err := s.metadataStore.Store(ctx, gid.String(), payload); err != nil {
logrus.Errorf("received error from metadata store: %+v", err)
return api.UploadResponse{}, err
if s.metadataStore != nil {
if err := s.metadataStore.Store(ctx, gid.String(), payload); err != nil {
logrus.Errorf("received error from metadata store: %+v", err)
return api.UploadResponse{}, err
}
}

if s.publisherStore != nil {
Expand Down Expand Up @@ -321,7 +323,6 @@ func (s *Server) AllArtifactsHandler(w http.ResponseWriter, r *http.Request) {
}

w.Header().Set("Content-Type", "application/json")

}

// @Summary List Artifact Versions
Expand Down
10 changes: 3 additions & 7 deletions pkg/server/server_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,7 @@ func (ut *UTServerSuite) Test_New() {
cfg := new(config.Config)
cfg.EnableGraphql = true
cfg.GraphqlWebClientEnable = true
cfg.EnableSQLStore = true
var err error
ut.testServer, err = New(cfg, WithMetadataStore(ut.mockedStorer), WithObjectStore(ut.mockedStorerGetter))
ut.NoError(err)
Expand All @@ -133,7 +134,6 @@ func (ut *UTServerSuite) Test_New() {
allPaths = append(allPaths, pathTemplate)
return nil
})

if err != nil {
ut.FailNow(err.Error())
}
Expand All @@ -151,6 +151,7 @@ func (ut *UTServerSuite) Test_New_EnableGraphQL_False() {
cfg := new(config.Config)
cfg.EnableGraphql = false
cfg.GraphqlWebClientEnable = true
cfg.EnableSQLStore = true
var err error
ut.testServer, err = New(cfg, WithMetadataStore(ut.mockedStorer), WithObjectStore(ut.mockedStorerGetter))
ut.NoError(err)
Expand All @@ -167,7 +168,6 @@ func (ut *UTServerSuite) Test_New_EnableGraphQL_False() {
allPaths = append(allPaths, pathTemplate)
return nil
})

if err != nil {
ut.FailNow(err.Error())
}
Expand All @@ -177,14 +177,14 @@ func (ut *UTServerSuite) Test_New_EnableGraphQL_False() {
ut.Contains(allPaths, "/v1/download/{gitoid}")
ut.Contains(allPaths, "/v1/upload")
ut.NotContains(allPaths, "/v1/query")
ut.Contains(allPaths, "/")
ut.Contains(allPaths, "/swagger/")
}

func (ut *UTServerSuite) Test_New_GraphqlWebClientEnable_False() {
cfg := new(config.Config)
cfg.EnableGraphql = true
cfg.GraphqlWebClientEnable = false
cfg.EnableSQLStore = true
var err error
ut.testServer, err = New(cfg, WithMetadataStore(ut.mockedStorer), WithObjectStore(ut.mockedStorerGetter))
ut.NoError(err)
Expand All @@ -201,7 +201,6 @@ func (ut *UTServerSuite) Test_New_GraphqlWebClientEnable_False() {
allPaths = append(allPaths, pathTemplate)
return nil
})

if err != nil {
ut.FailNow(err.Error())
}
Expand Down Expand Up @@ -264,7 +263,6 @@ func (ut *UTServerSuite) Test_Upload_FailedMetadatStprage() {
}

func (ut *UTServerSuite) Test_UploadHandler() {

w := httptest.NewRecorder()
requestBody := []byte("fakePayload")
request := httptest.NewRequest(http.MethodPost, "/v1/upload", bytes.NewBuffer(requestBody))
Expand All @@ -277,7 +275,6 @@ func (ut *UTServerSuite) Test_UploadHandler() {
}

func (ut *UTServerSuite) Test_UploadHandler_WrongMethod() {

w := httptest.NewRecorder()
requestBody := []byte("fakePayload")
request := httptest.NewRequest(http.MethodGet, "/upload", bytes.NewBuffer(requestBody))
Expand All @@ -291,7 +288,6 @@ func (ut *UTServerSuite) Test_UploadHandler_WrongMethod() {
}

func (ut *UTServerSuite) Test_UploadHandler_FailureUpload() {

w := httptest.NewRecorder()
requestBody := []byte("fakePayload")
request := httptest.NewRequest(http.MethodPost, "/upload", bytes.NewBuffer(requestBody))
Expand Down
Loading

0 comments on commit 9b9b4e9

Please sign in to comment.