Thanks to visit codestin.com
Credit goes to github.com

Skip to content

refactor: use coder/slog + minor go style changes #107

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 8 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
90 changes: 43 additions & 47 deletions cmd/readmevalidation/coderresources.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@ package main

import (
"bufio"
"context"
"errors"
"fmt"
"log"
"net/url"
"os"
"path"
Expand All @@ -15,7 +15,14 @@ import (
"gopkg.in/yaml.v3"
)

var supportedResourceTypes = []string{"modules", "templates"}
var (
supportedResourceTypes = []string{"modules", "templates"}

// TODO: This is a holdover from the validation logic used by the Coder Modules repo. It gives us some assurance, but
// realistically, we probably want to parse any Terraform code snippets, and make some deeper guarantees about how it's
// structured. Just validating whether it *can* be parsed as Terraform would be a big improvement.
terraformVersionRe = regexp.MustCompile("^\\s*\\bversion\\s+=")
)

type coderResourceFrontmatter struct {
Description string `yaml:"description"`
Expand All @@ -25,9 +32,8 @@ type coderResourceFrontmatter struct {
Tags []string `yaml:"tags"`
}

// coderResourceReadme represents a README describing a Terraform resource used
// to help create Coder workspaces. As of 2025-04-15, this encapsulates both
// Coder Modules and Coder Templates
// coderResourceReadme represents a README describing a Terraform resource used to help create Coder workspaces.
// As of 2025-04-15, this encapsulates both Coder Modules and Coder Templates.
type coderResourceReadme struct {
resourceType string
filePath string
Expand All @@ -50,35 +56,33 @@ func validateCoderResourceDescription(description string) error {
}

func validateCoderResourceIconURL(iconURL string) []error {
problems := []error{}

if iconURL == "" {
problems = append(problems, errors.New("icon URL cannot be empty"))
return problems
return []error{errors.New("icon URL cannot be empty")}
}

errs := []error{}

isAbsoluteURL := !strings.HasPrefix(iconURL, ".") && !strings.HasPrefix(iconURL, "/")
if isAbsoluteURL {
if _, err := url.ParseRequestURI(iconURL); err != nil {
problems = append(problems, errors.New("absolute icon URL is not correctly formatted"))
errs = append(errs, errors.New("absolute icon URL is not correctly formatted"))
}
if strings.Contains(iconURL, "?") {
problems = append(problems, errors.New("icon URLs cannot contain query parameters"))
errs = append(errs, errors.New("icon URLs cannot contain query parameters"))
}
return problems
return errs
}

// Would normally be skittish about having relative paths like this, but it
// should be safe because we have guarantees about the structure of the
// repo, and where this logic will run
// Would normally be skittish about having relative paths like this, but it should be safe because we have guarantees
// about the structure of the repo, and where this logic will run.
isPermittedRelativeURL := strings.HasPrefix(iconURL, "./") ||
strings.HasPrefix(iconURL, "/") ||
strings.HasPrefix(iconURL, "../../../../.icons")
if !isPermittedRelativeURL {
problems = append(problems, fmt.Errorf("relative icon URL %q must either be scoped to that module's directory, or the top-level /.icons directory (this can usually be done by starting the path with \"../../../.icons\")", iconURL))
errs = append(errs, fmt.Errorf("relative icon URL %q must either be scoped to that module's directory, or the top-level /.icons directory (this can usually be done by starting the path with \"../../../.icons\")", iconURL))
}

return problems
return errs
}

func validateCoderResourceTags(tags []string) error {
Expand All @@ -89,9 +93,8 @@ func validateCoderResourceTags(tags []string) error {
return nil
}

// All of these tags are used for the module/template filter controls in the
// Registry site. Need to make sure they can all be placed in the browser
// URL without issue
// All of these tags are used for the module/template filter controls in the Registry site. Need to make sure they
// can all be placed in the browser URL without issue.
invalidTags := []string{}
for _, t := range tags {
if t != url.QueryEscape(t) {
Expand All @@ -105,16 +108,11 @@ func validateCoderResourceTags(tags []string) error {
return nil
}

// Todo: This is a holdover from the validation logic used by the Coder Modules
// repo. It gives us some assurance, but realistically, we probably want to
// parse any Terraform code snippets, and make some deeper guarantees about how
// it's structured. Just validating whether it *can* be parsed as Terraform
// would be a big improvement.
var terraformVersionRe = regexp.MustCompile("^\\s*\\bversion\\s+=")

func validateCoderResourceReadmeBody(body string) []error {
trimmed := strings.TrimSpace(body)
var errs []error

trimmed := strings.TrimSpace(body)
// TODO: this may cause unexpected behaviour since the errors slice may have a 0 length. Add a test.
errs = append(errs, validateReadmeBody(trimmed)...)

foundParagraph := false
Expand All @@ -124,15 +122,15 @@ func validateCoderResourceReadmeBody(body string) []error {
lineNum := 0
isInsideCodeBlock := false
isInsideTerraform := false
nextLine := ""

lineScanner := bufio.NewScanner(strings.NewReader(trimmed))
for lineScanner.Scan() {
lineNum++
nextLine := lineScanner.Text()
nextLine = lineScanner.Text()
Copy link
Member

@Parkreiner Parkreiner May 16, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure I understand the point of this change, since nextLine isn't ever used outside the loop. I'm not a fan of scope pollution, and try to keep scoping as aggressively small as possible, even in the same function

  1. Is this mostly a memory optimization?
  2. I feel like I see code all the time in Go that looks just like what we used to have. Especially with range loops. Does the below example have the same problems as the old approach, where we're declaring new block-scoped variables on the stack once per iteration?
for i, value := range exampleSlice {
  // Stuff
}

Is there an optimization that range loops have that doesn't exist with other loops?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah this is just an optimization to reduce memory allocations. Very minor in this case since I doubt this loop has a lot of iterations, but without this a new string for nextLine is allocated for each iteration of the loop.

The Go compiler already does an optimization itself for for thing := range anotherThing to do the same optimization, assigning to the same var for each iteration rather than allocating a new one every time.


// Code assumes that invalid headers would've already been handled by
// the base validation function, so we don't need to check deeper if the
// first line isn't an h1
// Code assumes that invalid headers would've already been handled by the base validation function, so we don't
// need to check deeper if the first line isn't an h1.
if lineNum == 1 {
if !strings.HasPrefix(nextLine, "# ") {
break
Expand All @@ -159,15 +157,13 @@ func validateCoderResourceReadmeBody(body string) []error {
continue
}

// Code assumes that we can treat this case as the end of the "h1
// section" and don't need to process any further lines
// Code assumes that we can treat this case as the end of the "h1 section" and don't need to process any further lines.
if lineNum > 1 && strings.HasPrefix(nextLine, "#") {
break
}

// Code assumes that if we've reached this point, the only other options
// are: (1) empty spaces, (2) paragraphs, (3) HTML, and (4) asset
// references made via [] syntax
// Code assumes that if we've reached this point, the only other options are:
// (1) empty spaces, (2) paragraphs, (3) HTML, and (4) asset references made via [] syntax.
trimmedLine := strings.TrimSpace(nextLine)
isParagraph := trimmedLine != "" && !strings.HasPrefix(trimmedLine, "![") && !strings.HasPrefix(trimmedLine, "<")
foundParagraph = foundParagraph || isParagraph
Expand Down Expand Up @@ -257,9 +253,9 @@ func parseCoderResourceReadmeFiles(resourceType string, rms []readme) (map[strin

yamlValidationErrors := []error{}
for _, readme := range resources {
errors := validateCoderResourceReadme(readme)
if len(errors) > 0 {
yamlValidationErrors = append(yamlValidationErrors, errors...)
errs := validateCoderResourceReadme(readme)
if len(errs) > 0 {
yamlValidationErrors = append(yamlValidationErrors, errs...)
}
}
if len(yamlValidationErrors) != 0 {
Expand All @@ -272,7 +268,7 @@ func parseCoderResourceReadmeFiles(resourceType string, rms []readme) (map[strin
return resources, nil
}

// Todo: Need to beef up this function by grabbing each image/video URL from
// TODO: Need to beef up this function by grabbing each image/video URL from
// the body's AST
func validateCoderResourceRelativeUrls(resources map[string]coderResourceReadme) error {
return nil
Expand All @@ -286,13 +282,14 @@ func aggregateCoderResourceReadmeFiles(resourceType string) ([]readme, error) {

var allReadmeFiles []readme
var errs []error
var resourceDirs []os.DirEntry
for _, rf := range registryFiles {
if !rf.IsDir() {
continue
}

resourceRootPath := path.Join(rootRegistryPath, rf.Name(), resourceType)
resourceDirs, err := os.ReadDir(resourceRootPath)
resourceDirs, err = os.ReadDir(resourceRootPath)
if err != nil {
if !errors.Is(err, os.ErrNotExist) {
errs = append(errs, err)
Expand Down Expand Up @@ -338,17 +335,16 @@ func validateAllCoderResourceFilesOfType(resourceType string) error {
return err
}

log.Printf("Processing %d README files\n", len(allReadmeFiles))
logger.Info(context.Background(), "Processing README files", "num_files", len(allReadmeFiles))
resources, err := parseCoderResourceReadmeFiles(resourceType, allReadmeFiles)
if err != nil {
return err
}
log.Printf("Processed %d README files as valid Coder resources with type %q", len(resources), resourceType)
logger.Info(context.Background(), "Processed README files as valid Coder resources", "num_files", len(resources), "type", resourceType)

err = validateCoderResourceRelativeUrls(resources)
if err != nil {
if err = validateCoderResourceRelativeUrls(resources); err != nil {
return err
}
log.Printf("All relative URLs for %s READMEs are valid\n", resourceType)
logger.Info(context.Background(), "All relative URLs for READMEs are valid", "type", resourceType)
return nil
}
53 changes: 22 additions & 31 deletions cmd/readmevalidation/contributors.go
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
package main

import (
"context"
"errors"
"fmt"
"log"
"net/url"
"os"
"path"
Expand Down Expand Up @@ -35,7 +35,7 @@ type contributorProfileReadme struct {

func validateContributorDisplayName(displayName string) error {
if displayName == "" {
return fmt.Errorf("missing display_name")
return errors.New("missing display_name")
}

return nil
Expand All @@ -53,17 +53,16 @@ func validateContributorLinkedinURL(linkedinURL *string) error {
return nil
}

// validateContributorSupportEmail does best effort validation of a contributors email address. We can't 100% validate
// that this is correct without actually sending an email, especially because some contributors are individual developers
// and we don't want to do that on every single run of the CI pipeline. The best we can do is verify the general structure.
func validateContributorSupportEmail(email *string) []error {
if email == nil {
return nil
}

errs := []error{}

// Can't 100% validate that this is correct without actually sending
// an email, and especially with some contributors being individual
// developers, we don't want to do that on every single run of the CI
// pipeline. Best we can do is verify the general structure
username, server, ok := strings.Cut(*email, "@")
if !ok {
errs = append(errs, fmt.Errorf("email address %q is missing @ symbol", *email))
Expand Down Expand Up @@ -113,21 +112,18 @@ func validateContributorStatus(status string) error {
return nil
}

// Can't validate the image actually leads to a valid resource in a pure
// function, but can at least catch obvious problems
// Can't validate the image actually leads to a valid resource in a pure function, but can at least catch obvious problems.
func validateContributorAvatarURL(avatarURL *string) []error {
if avatarURL == nil {
return nil
}

errs := []error{}
if *avatarURL == "" {
errs = append(errs, errors.New("avatar URL must be omitted or non-empty string"))
return errs
return []error{errors.New("avatar URL must be omitted or non-empty string")}
}

// Have to use .Parse instead of .ParseRequestURI because this is the
// one field that's allowed to be a relative URL
errs := []error{}
// Have to use .Parse instead of .ParseRequestURI because this is the one field that's allowed to be a relative URL.
if _, err := url.Parse(*avatarURL); err != nil {
errs = append(errs, fmt.Errorf("URL %q is not a valid relative or absolute URL", *avatarURL))
}
Expand Down Expand Up @@ -220,8 +216,7 @@ func parseContributorFiles(readmeEntries []readme) (map[string]contributorProfil

yamlValidationErrors := []error{}
for _, p := range profilesByNamespace {
errors := validateContributorReadme(p)
if len(errors) > 0 {
if errors := validateContributorReadme(p); len(errors) > 0 {
yamlValidationErrors = append(yamlValidationErrors, errors...)
continue
}
Expand All @@ -245,11 +240,12 @@ func aggregateContributorReadmeFiles() ([]readme, error) {
allReadmeFiles := []readme{}
errs := []error{}
for _, e := range dirEntries {
dirPath := path.Join(rootRegistryPath, e.Name())
if !e.IsDir() {
continue
}

dirPath := path.Join(rootRegistryPath, e.Name())

readmePath := path.Join(dirPath, "README.md")
rmBytes, err := os.ReadFile(readmePath)
if err != nil {
Expand All @@ -273,20 +269,17 @@ func aggregateContributorReadmeFiles() ([]readme, error) {
}

func validateContributorRelativeUrls(contributors map[string]contributorProfileReadme) error {
// This function only validates relative avatar URLs for now, but it can be
// beefed up to validate more in the future
// This function only validates relative avatar URLs for now, but it can be beefed up to validate more in the future.
errs := []error{}

for _, con := range contributors {
// If the avatar URL is missing, we'll just assume that the Registry
// site build step will take care of filling in the data properly
// If the avatar URL is missing, we'll just assume that the Registry site build step will take care of filling
// in the data properly.
if con.frontmatter.AvatarURL == nil {
continue
}

isRelativeURL := strings.HasPrefix(*con.frontmatter.AvatarURL, ".") ||
strings.HasPrefix(*con.frontmatter.AvatarURL, "/")
if !isRelativeURL {
if !strings.HasPrefix(*con.frontmatter.AvatarURL, ".") || !strings.HasPrefix(*con.frontmatter.AvatarURL, "/") {
continue
}

Expand All @@ -297,8 +290,7 @@ func validateContributorRelativeUrls(contributors map[string]contributorProfileR

absolutePath := strings.TrimSuffix(con.filePath, "README.md") +
*con.frontmatter.AvatarURL
_, err := os.ReadFile(absolutePath)
if err != nil {
if _, err := os.ReadFile(absolutePath); err != nil {
errs = append(errs, fmt.Errorf("%q: relative avatar path %q does not point to image in file system", con.filePath, *con.frontmatter.AvatarURL))
}
}
Expand All @@ -318,19 +310,18 @@ func validateAllContributorFiles() error {
return err
}

log.Printf("Processing %d README files\n", len(allReadmeFiles))
logger.Info(context.Background(), "Processing README files", "num_files", len(allReadmeFiles))
Copy link
Member

@Parkreiner Parkreiner May 16, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm still new to structured logging. Is there any special behavior/benefit you get if you use the same key multiple times? I guess I'm just wondering how much of a concern it is to make sure you're using the same keys each time you describe the same "resource", particularly for a function call that takes a variadic slice of empty interfaces (so basically zero type-safety)

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's not the end of the world if you don't use the same key, but it does make searching for logs in some kind of log aggregation system much easier.

For example, a system I used to work on referred to the same internal tenant type within the system as variations of user, tenant, id, etc. Remembering which key was used on which logged lines complicated searches when I knew within I needed to see info for tenant="1234" but on some lines the logging was user="1234".

Again this is likely less important in the case of the registry but still a good practice.

contributors, err := parseContributorFiles(allReadmeFiles)
if err != nil {
return err
}
log.Printf("Processed %d README files as valid contributor profiles", len(contributors))
logger.Info(context.Background(), "Processed README files as valid contributor profiles", "num_contributors", len(contributors))

err = validateContributorRelativeUrls(contributors)
if err != nil {
if err = validateContributorRelativeUrls(contributors); err != nil {
return err
}
log.Println("All relative URLs for READMEs are valid")
logger.Info(context.Background(), "All relative URLs for READMEs are valid")

log.Printf("Processed all READMEs in the %q directory\n", rootRegistryPath)
logger.Info(context.Background(), "Processed all READMEs in directory", "dir", rootRegistryPath)
return nil
}
6 changes: 2 additions & 4 deletions cmd/readmevalidation/errors.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,8 @@ package main

import "fmt"

// validationPhaseError represents an error that occurred during a specific
// phase of README validation. It should be used to collect ALL validation
// errors that happened during a specific phase, rather than the first one
// encountered.
// validationPhaseError represents an error that occurred during a specific phase of README validation. It should be
// used to collect ALL validation errors that happened during a specific phase, rather than the first one encountered.
type validationPhaseError struct {
phase validationPhase
errors []error
Expand Down
Loading