Enable unparam linter (#31277)

Enable [unparam](https://github.com/mvdan/unparam) linter.

Often I could not tell the intention why param is unused, so I put
`//nolint` for those cases like webhook request creation functions never
using `ctx`.

---------

Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: delvh <dev.lh@web.de>
(cherry picked from commit fc2d75f86d77b022ece848acf2581c14ef21d43b)

Conflicts:
	modules/setting/config_env.go
	modules/storage/azureblob.go
	services/webhook/dingtalk.go
	services/webhook/discord.go
	services/webhook/feishu.go
	services/webhook/matrix.go
	services/webhook/msteams.go
	services/webhook/packagist.go
	services/webhook/slack.go
	services/webhook/telegram.go
	services/webhook/wechatwork.go

	run make lint-go and fix Forgejo specific warnings
This commit is contained in:
silverwind 2024-06-11 20:47:45 +02:00 committed by Earl Warren
parent 8346cd6c88
commit d8bc0495de
No known key found for this signature in database
GPG key ID: 0579CB2928A78A00
27 changed files with 88 additions and 123 deletions

View file

@ -22,6 +22,7 @@ linters:
- typecheck - typecheck
- unconvert - unconvert
- unused - unused
- unparam
- wastedassign - wastedassign
run: run:

View file

@ -215,16 +215,15 @@ func fileTimestampToTime(timestamp int64) time.Time {
return time.UnixMicro(timestamp) return time.UnixMicro(timestamp)
} }
func (f *file) loadMetaByPath() (*dbfsMeta, error) { func (f *file) loadMetaByPath() error {
var fileMeta dbfsMeta var fileMeta dbfsMeta
if ok, err := db.GetEngine(f.ctx).Where("full_path = ?", f.fullPath).Get(&fileMeta); err != nil { if ok, err := db.GetEngine(f.ctx).Where("full_path = ?", f.fullPath).Get(&fileMeta); err != nil {
return nil, err return err
} else if ok { } else if ok {
f.metaID = fileMeta.ID f.metaID = fileMeta.ID
f.blockSize = fileMeta.BlockSize f.blockSize = fileMeta.BlockSize
return &fileMeta, nil
} }
return nil, nil return nil
} }
func (f *file) open(flag int) (err error) { func (f *file) open(flag int) (err error) {
@ -288,10 +287,7 @@ func (f *file) createEmpty() error {
if err != nil { if err != nil {
return err return err
} }
if _, err = f.loadMetaByPath(); err != nil { return f.loadMetaByPath()
return err
}
return nil
} }
func (f *file) truncate() error { func (f *file) truncate() error {
@ -368,8 +364,5 @@ func buildPath(path string) string {
func newDbFile(ctx context.Context, path string) (*file, error) { func newDbFile(ctx context.Context, path string) (*file, error) {
path = buildPath(path) path = buildPath(path)
f := &file{ctx: ctx, fullPath: path, blockSize: defaultFileBlockSize} f := &file{ctx: ctx, fullPath: path, blockSize: defaultFileBlockSize}
if _, err := f.loadMetaByPath(); err != nil { return f, f.loadMetaByPath()
return nil, err
}
return f, nil
} }

View file

@ -99,9 +99,9 @@ func applySorts(sess *xorm.Session, sortType string, priorityRepoID int64) {
} }
} }
func applyLimit(sess *xorm.Session, opts *IssuesOptions) *xorm.Session { func applyLimit(sess *xorm.Session, opts *IssuesOptions) {
if opts.Paginator == nil || opts.Paginator.IsListAll() { if opts.Paginator == nil || opts.Paginator.IsListAll() {
return sess return
} }
start := 0 start := 0
@ -109,11 +109,9 @@ func applyLimit(sess *xorm.Session, opts *IssuesOptions) *xorm.Session {
start = (opts.Paginator.Page - 1) * opts.Paginator.PageSize start = (opts.Paginator.Page - 1) * opts.Paginator.PageSize
} }
sess.Limit(opts.Paginator.PageSize, start) sess.Limit(opts.Paginator.PageSize, start)
return sess
} }
func applyLabelsCondition(sess *xorm.Session, opts *IssuesOptions) *xorm.Session { func applyLabelsCondition(sess *xorm.Session, opts *IssuesOptions) {
if len(opts.LabelIDs) > 0 { if len(opts.LabelIDs) > 0 {
if opts.LabelIDs[0] == 0 { if opts.LabelIDs[0] == 0 {
sess.Where("issue.id NOT IN (SELECT issue_id FROM issue_label)") sess.Where("issue.id NOT IN (SELECT issue_id FROM issue_label)")
@ -136,11 +134,9 @@ func applyLabelsCondition(sess *xorm.Session, opts *IssuesOptions) *xorm.Session
if len(opts.ExcludedLabelNames) > 0 { if len(opts.ExcludedLabelNames) > 0 {
sess.And(builder.NotIn("issue.id", BuildLabelNamesIssueIDsCondition(opts.ExcludedLabelNames))) sess.And(builder.NotIn("issue.id", BuildLabelNamesIssueIDsCondition(opts.ExcludedLabelNames)))
} }
return sess
} }
func applyMilestoneCondition(sess *xorm.Session, opts *IssuesOptions) *xorm.Session { func applyMilestoneCondition(sess *xorm.Session, opts *IssuesOptions) {
if len(opts.MilestoneIDs) == 1 && opts.MilestoneIDs[0] == db.NoConditionID { if len(opts.MilestoneIDs) == 1 && opts.MilestoneIDs[0] == db.NoConditionID {
sess.And("issue.milestone_id = 0") sess.And("issue.milestone_id = 0")
} else if len(opts.MilestoneIDs) > 0 { } else if len(opts.MilestoneIDs) > 0 {
@ -153,11 +149,9 @@ func applyMilestoneCondition(sess *xorm.Session, opts *IssuesOptions) *xorm.Sess
From("milestone"). From("milestone").
Where(builder.In("name", opts.IncludeMilestones))) Where(builder.In("name", opts.IncludeMilestones)))
} }
return sess
} }
func applyProjectCondition(sess *xorm.Session, opts *IssuesOptions) *xorm.Session { func applyProjectCondition(sess *xorm.Session, opts *IssuesOptions) {
if opts.ProjectID > 0 { // specific project if opts.ProjectID > 0 { // specific project
sess.Join("INNER", "project_issue", "issue.id = project_issue.issue_id"). sess.Join("INNER", "project_issue", "issue.id = project_issue.issue_id").
And("project_issue.project_id=?", opts.ProjectID) And("project_issue.project_id=?", opts.ProjectID)
@ -166,10 +160,9 @@ func applyProjectCondition(sess *xorm.Session, opts *IssuesOptions) *xorm.Sessio
} }
// opts.ProjectID == 0 means all projects, // opts.ProjectID == 0 means all projects,
// do not need to apply any condition // do not need to apply any condition
return sess
} }
func applyProjectColumnCondition(sess *xorm.Session, opts *IssuesOptions) *xorm.Session { func applyProjectColumnCondition(sess *xorm.Session, opts *IssuesOptions) {
// opts.ProjectColumnID == 0 means all project columns, // opts.ProjectColumnID == 0 means all project columns,
// do not need to apply any condition // do not need to apply any condition
if opts.ProjectColumnID > 0 { if opts.ProjectColumnID > 0 {
@ -177,10 +170,9 @@ func applyProjectColumnCondition(sess *xorm.Session, opts *IssuesOptions) *xorm.
} else if opts.ProjectColumnID == db.NoConditionID { } else if opts.ProjectColumnID == db.NoConditionID {
sess.In("issue.id", builder.Select("issue_id").From("project_issue").Where(builder.Eq{"project_board_id": 0})) sess.In("issue.id", builder.Select("issue_id").From("project_issue").Where(builder.Eq{"project_board_id": 0}))
} }
return sess
} }
func applyRepoConditions(sess *xorm.Session, opts *IssuesOptions) *xorm.Session { func applyRepoConditions(sess *xorm.Session, opts *IssuesOptions) {
if len(opts.RepoIDs) == 1 { if len(opts.RepoIDs) == 1 {
opts.RepoCond = builder.Eq{"issue.repo_id": opts.RepoIDs[0]} opts.RepoCond = builder.Eq{"issue.repo_id": opts.RepoIDs[0]}
} else if len(opts.RepoIDs) > 1 { } else if len(opts.RepoIDs) > 1 {
@ -195,10 +187,9 @@ func applyRepoConditions(sess *xorm.Session, opts *IssuesOptions) *xorm.Session
if opts.RepoCond != nil { if opts.RepoCond != nil {
sess.And(opts.RepoCond) sess.And(opts.RepoCond)
} }
return sess
} }
func applyConditions(sess *xorm.Session, opts *IssuesOptions) *xorm.Session { func applyConditions(sess *xorm.Session, opts *IssuesOptions) {
if len(opts.IssueIDs) > 0 { if len(opts.IssueIDs) > 0 {
sess.In("issue.id", opts.IssueIDs) sess.In("issue.id", opts.IssueIDs)
} }
@ -261,8 +252,6 @@ func applyConditions(sess *xorm.Session, opts *IssuesOptions) *xorm.Session {
if opts.User != nil { if opts.User != nil {
sess.And(issuePullAccessibleRepoCond("issue.repo_id", opts.User.ID, opts.Org, opts.Team, opts.IsPull.Value())) sess.And(issuePullAccessibleRepoCond("issue.repo_id", opts.User.ID, opts.Org, opts.Team, opts.IsPull.Value()))
} }
return sess
} }
// teamUnitsRepoCond returns query condition for those repo id in the special org team with special units access // teamUnitsRepoCond returns query condition for those repo id in the special org team with special units access
@ -339,22 +328,22 @@ func issuePullAccessibleRepoCond(repoIDstr string, userID int64, org *organizati
return cond return cond
} }
func applyAssigneeCondition(sess *xorm.Session, assigneeID int64) *xorm.Session { func applyAssigneeCondition(sess *xorm.Session, assigneeID int64) {
return sess.Join("INNER", "issue_assignees", "issue.id = issue_assignees.issue_id"). sess.Join("INNER", "issue_assignees", "issue.id = issue_assignees.issue_id").
And("issue_assignees.assignee_id = ?", assigneeID) And("issue_assignees.assignee_id = ?", assigneeID)
} }
func applyPosterCondition(sess *xorm.Session, posterID int64) *xorm.Session { func applyPosterCondition(sess *xorm.Session, posterID int64) {
return sess.And("issue.poster_id=?", posterID) sess.And("issue.poster_id=?", posterID)
} }
func applyMentionedCondition(sess *xorm.Session, mentionedID int64) *xorm.Session { func applyMentionedCondition(sess *xorm.Session, mentionedID int64) {
return sess.Join("INNER", "issue_user", "issue.id = issue_user.issue_id"). sess.Join("INNER", "issue_user", "issue.id = issue_user.issue_id").
And("issue_user.is_mentioned = ?", true). And("issue_user.is_mentioned = ?", true).
And("issue_user.uid = ?", mentionedID) And("issue_user.uid = ?", mentionedID)
} }
func applyReviewRequestedCondition(sess *xorm.Session, reviewRequestedID int64) *xorm.Session { func applyReviewRequestedCondition(sess *xorm.Session, reviewRequestedID int64) {
existInTeamQuery := builder.Select("team_user.team_id"). existInTeamQuery := builder.Select("team_user.team_id").
From("team_user"). From("team_user").
Where(builder.Eq{"team_user.uid": reviewRequestedID}) Where(builder.Eq{"team_user.uid": reviewRequestedID})
@ -375,11 +364,11 @@ func applyReviewRequestedCondition(sess *xorm.Session, reviewRequestedID int64)
), ),
builder.In("review.id", maxReview), builder.In("review.id", maxReview),
)) ))
return sess.Where("issue.poster_id <> ?", reviewRequestedID). sess.Where("issue.poster_id <> ?", reviewRequestedID).
And(builder.In("issue.id", subQuery)) And(builder.In("issue.id", subQuery))
} }
func applyReviewedCondition(sess *xorm.Session, reviewedID int64) *xorm.Session { func applyReviewedCondition(sess *xorm.Session, reviewedID int64) {
// Query for pull requests where you are a reviewer or commenter, excluding // Query for pull requests where you are a reviewer or commenter, excluding
// any pull requests already returned by the review requested filter. // any pull requests already returned by the review requested filter.
notPoster := builder.Neq{"issue.poster_id": reviewedID} notPoster := builder.Neq{"issue.poster_id": reviewedID}
@ -406,11 +395,11 @@ func applyReviewedCondition(sess *xorm.Session, reviewedID int64) *xorm.Session
builder.In("type", CommentTypeComment, CommentTypeCode, CommentTypeReview), builder.In("type", CommentTypeComment, CommentTypeCode, CommentTypeReview),
)), )),
) )
return sess.And(notPoster, builder.Or(reviewed, commented)) sess.And(notPoster, builder.Or(reviewed, commented))
} }
func applySubscribedCondition(sess *xorm.Session, subscriberID int64) *xorm.Session { func applySubscribedCondition(sess *xorm.Session, subscriberID int64) {
return sess.And( sess.And(
builder. builder.
NotIn("issue.id", NotIn("issue.id",
builder.Select("issue_id"). builder.Select("issue_id").

View file

@ -28,7 +28,7 @@ type PullRequestsOptions struct {
MilestoneID int64 MilestoneID int64
} }
func listPullRequestStatement(ctx context.Context, baseRepoID int64, opts *PullRequestsOptions) (*xorm.Session, error) { func listPullRequestStatement(ctx context.Context, baseRepoID int64, opts *PullRequestsOptions) *xorm.Session {
sess := db.GetEngine(ctx).Where("pull_request.base_repo_id=?", baseRepoID) sess := db.GetEngine(ctx).Where("pull_request.base_repo_id=?", baseRepoID)
sess.Join("INNER", "issue", "pull_request.issue_id = issue.id") sess.Join("INNER", "issue", "pull_request.issue_id = issue.id")
@ -46,7 +46,7 @@ func listPullRequestStatement(ctx context.Context, baseRepoID int64, opts *PullR
sess.And("issue.milestone_id=?", opts.MilestoneID) sess.And("issue.milestone_id=?", opts.MilestoneID)
} }
return sess, nil return sess
} }
func GetUnmergedPullRequestsByHeadInfoMax(ctx context.Context, repoID, olderThan int64, branch string) ([]*PullRequest, error) { func GetUnmergedPullRequestsByHeadInfoMax(ctx context.Context, repoID, olderThan int64, branch string) ([]*PullRequest, error) {
@ -136,23 +136,15 @@ func PullRequests(ctx context.Context, baseRepoID int64, opts *PullRequestsOptio
opts.Page = 1 opts.Page = 1
} }
countSession, err := listPullRequestStatement(ctx, baseRepoID, opts) countSession := listPullRequestStatement(ctx, baseRepoID, opts)
if err != nil {
log.Error("listPullRequestStatement: %v", err)
return nil, 0, err
}
maxResults, err := countSession.Count(new(PullRequest)) maxResults, err := countSession.Count(new(PullRequest))
if err != nil { if err != nil {
log.Error("Count PRs: %v", err) log.Error("Count PRs: %v", err)
return nil, maxResults, err return nil, maxResults, err
} }
findSession, err := listPullRequestStatement(ctx, baseRepoID, opts) findSession := listPullRequestStatement(ctx, baseRepoID, opts)
applySorts(findSession, opts.SortType, 0) applySorts(findSession, opts.SortType, 0)
if err != nil {
log.Error("listPullRequestStatement: %v", err)
return nil, maxResults, err
}
findSession = db.SetSessionPagination(findSession, opts) findSession = db.SetSessionPagination(findSession, opts)
prs := make([]*PullRequest, 0, opts.PageSize) prs := make([]*PullRequest, 0, opts.PageSize)
return prs, maxResults, findSession.Find(&prs) return prs, maxResults, findSession.Find(&prs)

View file

@ -18,7 +18,7 @@ func parseIntParam(value, param, algorithmName, config string, previousErr error
return parsed, previousErr // <- Keep the previous error as this function should still return an error once everything has been checked if any call failed return parsed, previousErr // <- Keep the previous error as this function should still return an error once everything has been checked if any call failed
} }
func parseUIntParam(value, param, algorithmName, config string, previousErr error) (uint64, error) { func parseUIntParam(value, param, algorithmName, config string, previousErr error) (uint64, error) { //nolint:unparam
parsed, err := strconv.ParseUint(value, 10, 64) parsed, err := strconv.ParseUint(value, 10, 64)
if err != nil { if err != nil {
log.Error("invalid integer for %s representation in %s hash spec %s", param, algorithmName, config) log.Error("invalid integer for %s representation in %s hash spec %s", param, algorithmName, config)

View file

@ -42,20 +42,19 @@ var (
) )
// loadGitVersion returns current Git version from shell. Internal usage only. // loadGitVersion returns current Git version from shell. Internal usage only.
func loadGitVersion() (*version.Version, error) { func loadGitVersion() error {
// doesn't need RWMutex because it's executed by Init() // doesn't need RWMutex because it's executed by Init()
if gitVersion != nil { if gitVersion != nil {
return gitVersion, nil return nil
} }
stdout, _, runErr := NewCommand(DefaultContext, "version").RunStdString(nil) stdout, _, runErr := NewCommand(DefaultContext, "version").RunStdString(nil)
if runErr != nil { if runErr != nil {
return nil, runErr return runErr
} }
fields := strings.Fields(stdout) fields := strings.Fields(stdout)
if len(fields) < 3 { if len(fields) < 3 {
return nil, fmt.Errorf("invalid git version output: %s", stdout) return fmt.Errorf("invalid git version output: %s", stdout)
} }
var versionString string var versionString string
@ -70,7 +69,7 @@ func loadGitVersion() (*version.Version, error) {
var err error var err error
gitVersion, err = version.NewVersion(versionString) gitVersion, err = version.NewVersion(versionString)
return gitVersion, err return err
} }
// SetExecutablePath changes the path of git executable and checks the file permission and version. // SetExecutablePath changes the path of git executable and checks the file permission and version.
@ -85,7 +84,7 @@ func SetExecutablePath(path string) error {
} }
GitExecutable = absPath GitExecutable = absPath
_, err = loadGitVersion() err = loadGitVersion()
if err != nil { if err != nil {
return fmt.Errorf("unable to load git version: %w", err) return fmt.Errorf("unable to load git version: %w", err)
} }
@ -312,7 +311,7 @@ func syncGitConfig() (err error) {
// CheckGitVersionAtLeast check git version is at least the constraint version // CheckGitVersionAtLeast check git version is at least the constraint version
func CheckGitVersionAtLeast(atLeast string) error { func CheckGitVersionAtLeast(atLeast string) error {
if _, err := loadGitVersion(); err != nil { if err := loadGitVersion(); err != nil {
return err return err
} }
atLeastVersion, err := version.NewVersion(atLeast) atLeastVersion, err := version.NewVersion(atLeast)
@ -327,7 +326,7 @@ func CheckGitVersionAtLeast(atLeast string) error {
// CheckGitVersionEqual checks if the git version is equal to the constraint version. // CheckGitVersionEqual checks if the git version is equal to the constraint version.
func CheckGitVersionEqual(equal string) error { func CheckGitVersionEqual(equal string) error {
if _, err := loadGitVersion(); err != nil { if err := loadGitVersion(); err != nil {
return err return err
} }
atLeastVersion, err := version.NewVersion(equal) atLeastVersion, err := version.NewVersion(equal)

View file

@ -34,13 +34,13 @@ type ObjectFormat interface {
ComputeHash(t ObjectType, content []byte) ObjectID ComputeHash(t ObjectType, content []byte) ObjectID
} }
func computeHash(dst []byte, hasher hash.Hash, t ObjectType, content []byte) []byte { func computeHash(dst []byte, hasher hash.Hash, t ObjectType, content []byte) {
_, _ = hasher.Write(t.Bytes()) _, _ = hasher.Write(t.Bytes())
_, _ = hasher.Write([]byte(" ")) _, _ = hasher.Write([]byte(" "))
_, _ = hasher.Write([]byte(strconv.Itoa(len(content)))) _, _ = hasher.Write([]byte(strconv.Itoa(len(content))))
_, _ = hasher.Write([]byte{0}) _, _ = hasher.Write([]byte{0})
_, _ = hasher.Write(content) _, _ = hasher.Write(content)
return hasher.Sum(dst) hasher.Sum(dst)
} }
/* SHA1 Type */ /* SHA1 Type */

View file

@ -48,7 +48,7 @@ func (r *HTMLRenderer) renderCodeSpan(w util.BufWriter, source []byte, n ast.Nod
return ast.WalkContinue, nil return ast.WalkContinue, nil
} }
func (g *ASTTransformer) transformCodeSpan(ctx *markup.RenderContext, v *ast.CodeSpan, reader text.Reader) { func (g *ASTTransformer) transformCodeSpan(_ *markup.RenderContext, v *ast.CodeSpan, reader text.Reader) {
colorContent := v.Text(reader.Source()) colorContent := v.Text(reader.Source())
if matchColor(strings.ToLower(string(colorContent))) { if matchColor(strings.ToLower(string(colorContent))) {
v.AppendChild(v, NewColorPreview(colorContent)) v.AppendChild(v, NewColorPreview(colorContent))

View file

@ -185,8 +185,6 @@ func ParseDescription(r io.Reader) (*Package, error) {
} }
func setField(p *Package, data string) error { func setField(p *Package, data string) error {
const listDelimiter = ", "
if data == "" { if data == "" {
return nil return nil
} }
@ -215,19 +213,19 @@ func setField(p *Package, data string) error {
case "Description": case "Description":
p.Metadata.Description = value p.Metadata.Description = value
case "URL": case "URL":
p.Metadata.ProjectURL = splitAndTrim(value, listDelimiter) p.Metadata.ProjectURL = splitAndTrim(value)
case "License": case "License":
p.Metadata.License = value p.Metadata.License = value
case "Author": case "Author":
p.Metadata.Authors = splitAndTrim(authorReplacePattern.ReplaceAllString(value, ""), listDelimiter) p.Metadata.Authors = splitAndTrim(authorReplacePattern.ReplaceAllString(value, ""))
case "Depends": case "Depends":
p.Metadata.Depends = splitAndTrim(value, listDelimiter) p.Metadata.Depends = splitAndTrim(value)
case "Imports": case "Imports":
p.Metadata.Imports = splitAndTrim(value, listDelimiter) p.Metadata.Imports = splitAndTrim(value)
case "Suggests": case "Suggests":
p.Metadata.Suggests = splitAndTrim(value, listDelimiter) p.Metadata.Suggests = splitAndTrim(value)
case "LinkingTo": case "LinkingTo":
p.Metadata.LinkingTo = splitAndTrim(value, listDelimiter) p.Metadata.LinkingTo = splitAndTrim(value)
case "NeedsCompilation": case "NeedsCompilation":
p.Metadata.NeedsCompilation = value == "yes" p.Metadata.NeedsCompilation = value == "yes"
} }
@ -235,8 +233,8 @@ func setField(p *Package, data string) error {
return nil return nil
} }
func splitAndTrim(s, sep string) []string { func splitAndTrim(s string) []string {
items := strings.Split(s, sep) items := strings.Split(s, ", ")
for i := range items { for i := range items {
items[i] = strings.TrimSpace(items[i]) items[i] = strings.TrimSpace(items[i])
} }

View file

@ -97,7 +97,7 @@ func decodeEnvSectionKey(encoded string) (ok bool, section, key string) {
// decodeEnvironmentKey decode the environment key to section and key // decodeEnvironmentKey decode the environment key to section and key
// The environment key is in the form of GITEA__SECTION__KEY or GITEA__SECTION__KEY__FILE // The environment key is in the form of GITEA__SECTION__KEY or GITEA__SECTION__KEY__FILE
func decodeEnvironmentKey(prefixRegexp *regexp.Regexp, suffixFile, envKey string) (ok bool, section, key string, useFileValue bool) { func decodeEnvironmentKey(prefixRegexp *regexp.Regexp, suffixFile, envKey string) (ok bool, section, key string, useFileValue bool) { //nolint:unparam
if strings.HasSuffix(envKey, suffixFile) { if strings.HasSuffix(envKey, suffixFile) {
useFileValue = true useFileValue = true
envKey = envKey[:len(envKey)-len(suffixFile)] envKey = envKey[:len(envKey)-len(suffixFile)]

View file

@ -122,7 +122,7 @@ const (
targetSecIsSec // target section is from the name seciont [name] targetSecIsSec // target section is from the name seciont [name]
) )
func getStorageSectionByType(rootCfg ConfigProvider, typ string) (ConfigSection, targetSecType, error) { func getStorageSectionByType(rootCfg ConfigProvider, typ string) (ConfigSection, targetSecType, error) { //nolint:unparam
targetSec, err := rootCfg.GetSection(storageSectionName + "." + typ) targetSec, err := rootCfg.GetSection(storageSectionName + "." + typ)
if err != nil { if err != nil {
if !IsValidStorageType(StorageType(typ)) { if !IsValidStorageType(StorageType(typ)) {

View file

@ -15,10 +15,7 @@ import (
// GenerateKeyPair generates a public and private keypair // GenerateKeyPair generates a public and private keypair
func GenerateKeyPair(bits int) (string, string, error) { func GenerateKeyPair(bits int) (string, string, error) {
priv, _ := rsa.GenerateKey(rand.Reader, bits) priv, _ := rsa.GenerateKey(rand.Reader, bits)
privPem, err := pemBlockForPriv(priv) privPem := pemBlockForPriv(priv)
if err != nil {
return "", "", err
}
pubPem, err := pemBlockForPub(&priv.PublicKey) pubPem, err := pemBlockForPub(&priv.PublicKey)
if err != nil { if err != nil {
return "", "", err return "", "", err
@ -26,12 +23,12 @@ func GenerateKeyPair(bits int) (string, string, error) {
return privPem, pubPem, nil return privPem, pubPem, nil
} }
func pemBlockForPriv(priv *rsa.PrivateKey) (string, error) { func pemBlockForPriv(priv *rsa.PrivateKey) string {
privBytes := pem.EncodeToMemory(&pem.Block{ privBytes := pem.EncodeToMemory(&pem.Block{
Type: "RSA PRIVATE KEY", Type: "RSA PRIVATE KEY",
Bytes: x509.MarshalPKCS1PrivateKey(priv), Bytes: x509.MarshalPKCS1PrivateKey(priv),
}) })
return string(privBytes), nil return string(privBytes)
} }
func pemBlockForPub(pub *rsa.PublicKey) (string, error) { func pemBlockForPub(pub *rsa.PublicKey) (string, error) {

View file

@ -241,16 +241,12 @@ func (ar artifactRoutes) uploadArtifact(ctx *ArtifactContext) {
} }
// get upload file size // get upload file size
fileRealTotalSize, contentLength, err := getUploadFileSize(ctx) fileRealTotalSize, contentLength := getUploadFileSize(ctx)
if err != nil {
log.Error("Error get upload file size: %v", err)
ctx.Error(http.StatusInternalServerError, "Error get upload file size")
return
}
// get artifact retention days // get artifact retention days
expiredDays := setting.Actions.ArtifactRetentionDays expiredDays := setting.Actions.ArtifactRetentionDays
if queryRetentionDays := ctx.Req.URL.Query().Get("retentionDays"); queryRetentionDays != "" { if queryRetentionDays := ctx.Req.URL.Query().Get("retentionDays"); queryRetentionDays != "" {
var err error
expiredDays, err = strconv.ParseInt(queryRetentionDays, 10, 64) expiredDays, err = strconv.ParseInt(queryRetentionDays, 10, 64)
if err != nil { if err != nil {
log.Error("Error parse retention days: %v", err) log.Error("Error parse retention days: %v", err)

View file

@ -43,7 +43,7 @@ func validateRunID(ctx *ArtifactContext) (*actions.ActionTask, int64, bool) {
return task, runID, true return task, runID, true
} }
func validateRunIDV4(ctx *ArtifactContext, rawRunID string) (*actions.ActionTask, int64, bool) { func validateRunIDV4(ctx *ArtifactContext, rawRunID string) (*actions.ActionTask, int64, bool) { //nolint:unparam
task := ctx.ActionTask task := ctx.ActionTask
runID, err := strconv.ParseInt(rawRunID, 10, 64) runID, err := strconv.ParseInt(rawRunID, 10, 64)
if err != nil || task.Job.RunID != runID { if err != nil || task.Job.RunID != runID {
@ -84,11 +84,11 @@ func parseArtifactItemPath(ctx *ArtifactContext) (string, string, bool) {
// getUploadFileSize returns the size of the file to be uploaded. // getUploadFileSize returns the size of the file to be uploaded.
// The raw size is the size of the file as reported by the header X-TFS-FileLength. // The raw size is the size of the file as reported by the header X-TFS-FileLength.
func getUploadFileSize(ctx *ArtifactContext) (int64, int64, error) { func getUploadFileSize(ctx *ArtifactContext) (int64, int64) {
contentLength := ctx.Req.ContentLength contentLength := ctx.Req.ContentLength
xTfsLength, _ := strconv.ParseInt(ctx.Req.Header.Get(artifactXTfsFileLengthHeader), 10, 64) xTfsLength, _ := strconv.ParseInt(ctx.Req.Header.Get(artifactXTfsFileLengthHeader), 10, 64)
if xTfsLength > 0 { if xTfsLength > 0 {
return xTfsLength, contentLength, nil return xTfsLength, contentLength
} }
return contentLength, contentLength, nil return contentLength, contentLength
} }

View file

@ -26,7 +26,7 @@ var uploadVersionMutex sync.Mutex
// saveAsPackageBlob creates a package blob from an upload // saveAsPackageBlob creates a package blob from an upload
// The uploaded blob gets stored in a special upload version to link them to the package/image // The uploaded blob gets stored in a special upload version to link them to the package/image
func saveAsPackageBlob(ctx context.Context, hsr packages_module.HashedSizeReader, pci *packages_service.PackageCreationInfo) (*packages_model.PackageBlob, error) { func saveAsPackageBlob(ctx context.Context, hsr packages_module.HashedSizeReader, pci *packages_service.PackageCreationInfo) (*packages_model.PackageBlob, error) { //nolint:unparam
pb := packages_service.NewPackageBlob(hsr) pb := packages_service.NewPackageBlob(hsr)
exists := false exists := false

View file

@ -36,7 +36,7 @@ func apiError(ctx *context.Context, status int, obj any) {
}) })
} }
func xmlResponse(ctx *context.Context, status int, obj any) { func xmlResponse(ctx *context.Context, status int, obj any) { //nolint:unparam
ctx.Resp.Header().Set("Content-Type", "application/atom+xml; charset=utf-8") ctx.Resp.Header().Set("Content-Type", "application/atom+xml; charset=utf-8")
ctx.Resp.WriteHeader(status) ctx.Resp.WriteHeader(status)
if _, err := ctx.Resp.Write([]byte(xml.Header)); err != nil { if _, err := ctx.Resp.Write([]byte(xml.Header)); err != nil {

View file

@ -64,7 +64,7 @@ func CompareDiff(ctx *context.APIContext) {
} }
} }
_, _, headGitRepo, ci, _, _ := parseCompareInfo(ctx, api.CreatePullRequestOption{ _, headGitRepo, ci, _, _ := parseCompareInfo(ctx, api.CreatePullRequestOption{
Base: infos[0], Base: infos[0],
Head: infos[1], Head: infos[1],
}) })

View file

@ -406,7 +406,7 @@ func CreatePullRequest(ctx *context.APIContext) {
) )
// Get repo/branch information // Get repo/branch information
_, headRepo, headGitRepo, compareInfo, baseBranch, headBranch := parseCompareInfo(ctx, form) headRepo, headGitRepo, compareInfo, baseBranch, headBranch := parseCompareInfo(ctx, form)
if ctx.Written() { if ctx.Written() {
return return
} }
@ -1051,7 +1051,7 @@ func MergePullRequest(ctx *context.APIContext) {
ctx.Status(http.StatusOK) ctx.Status(http.StatusOK)
} }
func parseCompareInfo(ctx *context.APIContext, form api.CreatePullRequestOption) (*user_model.User, *repo_model.Repository, *git.Repository, *git.CompareInfo, string, string) { func parseCompareInfo(ctx *context.APIContext, form api.CreatePullRequestOption) (*repo_model.Repository, *git.Repository, *git.CompareInfo, string, string) {
baseRepo := ctx.Repo.Repository baseRepo := ctx.Repo.Repository
// Get compared branches information // Get compared branches information
@ -1084,14 +1084,14 @@ func parseCompareInfo(ctx *context.APIContext, form api.CreatePullRequestOption)
} else { } else {
ctx.Error(http.StatusInternalServerError, "GetUserByName", err) ctx.Error(http.StatusInternalServerError, "GetUserByName", err)
} }
return nil, nil, nil, nil, "", "" return nil, nil, nil, "", ""
} }
headBranch = headInfos[1] headBranch = headInfos[1]
// The head repository can also point to the same repo // The head repository can also point to the same repo
isSameRepo = ctx.Repo.Owner.ID == headUser.ID isSameRepo = ctx.Repo.Owner.ID == headUser.ID
} else { } else {
ctx.NotFound() ctx.NotFound()
return nil, nil, nil, nil, "", "" return nil, nil, nil, "", ""
} }
ctx.Repo.PullRequest.SameRepo = isSameRepo ctx.Repo.PullRequest.SameRepo = isSameRepo
@ -1099,7 +1099,7 @@ func parseCompareInfo(ctx *context.APIContext, form api.CreatePullRequestOption)
// Check if base branch is valid. // Check if base branch is valid.
if !ctx.Repo.GitRepo.IsBranchExist(baseBranch) && !ctx.Repo.GitRepo.IsTagExist(baseBranch) { if !ctx.Repo.GitRepo.IsBranchExist(baseBranch) && !ctx.Repo.GitRepo.IsTagExist(baseBranch) {
ctx.NotFound("BaseNotExist") ctx.NotFound("BaseNotExist")
return nil, nil, nil, nil, "", "" return nil, nil, nil, "", ""
} }
// Check if current user has fork of repository or in the same repository. // Check if current user has fork of repository or in the same repository.
@ -1107,7 +1107,7 @@ func parseCompareInfo(ctx *context.APIContext, form api.CreatePullRequestOption)
if headRepo == nil && !isSameRepo { if headRepo == nil && !isSameRepo {
log.Trace("parseCompareInfo[%d]: does not have fork or in same repository", baseRepo.ID) log.Trace("parseCompareInfo[%d]: does not have fork or in same repository", baseRepo.ID)
ctx.NotFound("GetForkedRepo") ctx.NotFound("GetForkedRepo")
return nil, nil, nil, nil, "", "" return nil, nil, nil, "", ""
} }
var headGitRepo *git.Repository var headGitRepo *git.Repository
@ -1118,7 +1118,7 @@ func parseCompareInfo(ctx *context.APIContext, form api.CreatePullRequestOption)
headGitRepo, err = gitrepo.OpenRepository(ctx, headRepo) headGitRepo, err = gitrepo.OpenRepository(ctx, headRepo)
if err != nil { if err != nil {
ctx.Error(http.StatusInternalServerError, "OpenRepository", err) ctx.Error(http.StatusInternalServerError, "OpenRepository", err)
return nil, nil, nil, nil, "", "" return nil, nil, nil, "", ""
} }
} }
@ -1127,7 +1127,7 @@ func parseCompareInfo(ctx *context.APIContext, form api.CreatePullRequestOption)
if err != nil { if err != nil {
headGitRepo.Close() headGitRepo.Close()
ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", err) ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", err)
return nil, nil, nil, nil, "", "" return nil, nil, nil, "", ""
} }
if !permBase.CanReadIssuesOrPulls(true) || !permBase.CanRead(unit.TypeCode) { if !permBase.CanReadIssuesOrPulls(true) || !permBase.CanRead(unit.TypeCode) {
if log.IsTrace() { if log.IsTrace() {
@ -1138,7 +1138,7 @@ func parseCompareInfo(ctx *context.APIContext, form api.CreatePullRequestOption)
} }
headGitRepo.Close() headGitRepo.Close()
ctx.NotFound("Can't read pulls or can't read UnitTypeCode") ctx.NotFound("Can't read pulls or can't read UnitTypeCode")
return nil, nil, nil, nil, "", "" return nil, nil, nil, "", ""
} }
// user should have permission to read headrepo's codes // user should have permission to read headrepo's codes
@ -1146,7 +1146,7 @@ func parseCompareInfo(ctx *context.APIContext, form api.CreatePullRequestOption)
if err != nil { if err != nil {
headGitRepo.Close() headGitRepo.Close()
ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", err) ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", err)
return nil, nil, nil, nil, "", "" return nil, nil, nil, "", ""
} }
if !permHead.CanRead(unit.TypeCode) { if !permHead.CanRead(unit.TypeCode) {
if log.IsTrace() { if log.IsTrace() {
@ -1157,24 +1157,24 @@ func parseCompareInfo(ctx *context.APIContext, form api.CreatePullRequestOption)
} }
headGitRepo.Close() headGitRepo.Close()
ctx.NotFound("Can't read headRepo UnitTypeCode") ctx.NotFound("Can't read headRepo UnitTypeCode")
return nil, nil, nil, nil, "", "" return nil, nil, nil, "", ""
} }
// Check if head branch is valid. // Check if head branch is valid.
if !headGitRepo.IsBranchExist(headBranch) && !headGitRepo.IsTagExist(headBranch) { if !headGitRepo.IsBranchExist(headBranch) && !headGitRepo.IsTagExist(headBranch) {
headGitRepo.Close() headGitRepo.Close()
ctx.NotFound() ctx.NotFound()
return nil, nil, nil, nil, "", "" return nil, nil, nil, "", ""
} }
compareInfo, err := headGitRepo.GetCompareInfo(repo_model.RepoPath(baseRepo.Owner.Name, baseRepo.Name), baseBranch, headBranch, false, false) compareInfo, err := headGitRepo.GetCompareInfo(repo_model.RepoPath(baseRepo.Owner.Name, baseRepo.Name), baseBranch, headBranch, false, false)
if err != nil { if err != nil {
headGitRepo.Close() headGitRepo.Close()
ctx.Error(http.StatusInternalServerError, "GetCompareInfo", err) ctx.Error(http.StatusInternalServerError, "GetCompareInfo", err)
return nil, nil, nil, nil, "", "" return nil, nil, nil, "", ""
} }
return headUser, headRepo, headGitRepo, compareInfo, baseBranch, headBranch return headRepo, headGitRepo, compareInfo, baseBranch, headBranch
} }
// UpdatePullRequest merge PR's baseBranch into headBranch // UpdatePullRequest merge PR's baseBranch into headBranch

View file

@ -434,7 +434,7 @@ func preReceiveBranch(ctx *preReceiveContext, oldCommitID, newCommitID string, r
} }
} }
func preReceiveTag(ctx *preReceiveContext, oldCommitID, newCommitID string, refFullName git.RefName) { func preReceiveTag(ctx *preReceiveContext, oldCommitID, newCommitID string, refFullName git.RefName) { //nolint:unparam
if !ctx.AssertCanWriteCode() { if !ctx.AssertCanWriteCode() {
return return
} }
@ -470,7 +470,7 @@ func preReceiveTag(ctx *preReceiveContext, oldCommitID, newCommitID string, refF
} }
} }
func preReceiveFor(ctx *preReceiveContext, oldCommitID, newCommitID string, refFullName git.RefName) { func preReceiveFor(ctx *preReceiveContext, oldCommitID, newCommitID string, refFullName git.RefName) { //nolint:unparam
if !ctx.AssertCreatePullRequest() { if !ctx.AssertCreatePullRequest() {
return return
} }

View file

@ -183,7 +183,7 @@ func ChangeConfig(ctx *context.Context) {
value := ctx.FormString("value") value := ctx.FormString("value")
cfg := setting.Config() cfg := setting.Config()
marshalBool := func(v string) (string, error) { marshalBool := func(v string) (string, error) { //nolint:unparam
if b, _ := strconv.ParseBool(v); b { if b, _ := strconv.ParseBool(v); b {
return "true", nil return "true", nil
} }

View file

@ -39,7 +39,7 @@ func redirectToBadge(ctx *context_module.Context, label, text, color string) {
ctx.Redirect(getBadgeURL(ctx, label, text, color)) ctx.Redirect(getBadgeURL(ctx, label, text, color))
} }
func errorBadge(ctx *context_module.Context, label, text string) { func errorBadge(ctx *context_module.Context, label, text string) { //nolint:unparam
ctx.Redirect(getBadgeURL(ctx, label, text, "crimson")) ctx.Redirect(getBadgeURL(ctx, label, text, "crimson"))
} }

View file

@ -474,7 +474,7 @@ func handleSchedules(
detectedWorkflows []*actions_module.DetectedWorkflow, detectedWorkflows []*actions_module.DetectedWorkflow,
commit *git.Commit, commit *git.Commit,
input *notifyInput, input *notifyInput,
ref string, _ string,
) error { ) error {
branch, err := commit.GetBranchName() branch, err := commit.GetBranchName()
if err != nil { if err != nil {

View file

@ -92,7 +92,7 @@ func (o *repository) GetRepositoryPushURL() string {
return o.getURL() return o.getURL()
} }
func newRepository(ctx context.Context) generic.NodeDriverInterface { func newRepository(_ context.Context) generic.NodeDriverInterface {
r := &repository{ r := &repository{
f: &f3.Repository{}, f: &f3.Repository{},
} }

View file

@ -14,7 +14,7 @@ import (
"code.forgejo.org/f3/gof3/v3/options" "code.forgejo.org/f3/gof3/v3/options"
) )
func newTestOptions(t *testing.T) options.Interface { func newTestOptions(_ *testing.T) options.Interface {
o := options.GetFactory(driver_options.Name)().(*driver_options.Options) o := options.GetFactory(driver_options.Name)().(*driver_options.Options)
o.SetLogger(util.NewF3Logger(nil, forgejo_log.GetLogger(forgejo_log.DEFAULT))) o.SetLogger(util.NewF3Logger(nil, forgejo_log.GetLogger(forgejo_log.DEFAULT)))
return o return o

View file

@ -45,7 +45,7 @@ func MailNewUser(ctx context.Context, u *user_model.User) {
} }
} }
func mailNewUser(ctx context.Context, u *user_model.User, lang string, tos []string) { func mailNewUser(_ context.Context, u *user_model.User, lang string, tos []string) {
locale := translation.NewLocale(lang) locale := translation.NewLocale(lang)
manageUserURL := setting.AppURL + "admin/users/" + strconv.FormatInt(u.ID, 10) manageUserURL := setting.AppURL + "admin/users/" + strconv.FormatInt(u.ID, 10)

View file

@ -252,7 +252,7 @@ func Merge(ctx context.Context, pr *issues_model.PullRequest, doer *user_model.U
} }
// doMergeAndPush performs the merge operation without changing any pull information in database and pushes it up to the base repository // doMergeAndPush performs the merge operation without changing any pull information in database and pushes it up to the base repository
func doMergeAndPush(ctx context.Context, pr *issues_model.PullRequest, doer *user_model.User, mergeStyle repo_model.MergeStyle, expectedHeadCommitID, message string, pushTrigger repo_module.PushTrigger) (string, error) { func doMergeAndPush(ctx context.Context, pr *issues_model.PullRequest, doer *user_model.User, mergeStyle repo_model.MergeStyle, expectedHeadCommitID, message string, pushTrigger repo_module.PushTrigger) (string, error) { //nolint:unparam
// Clone base repo. // Clone base repo.
mergeCtx, cancel, err := createTemporaryRepoForMerge(ctx, pr, doer, expectedHeadCommitID) mergeCtx, cancel, err := createTemporaryRepoForMerge(ctx, pr, doer, expectedHeadCommitID)
if err != nil { if err != nil {

View file

@ -84,7 +84,7 @@ func MaybePromoteRemoteUser(ctx context.Context, source *auth_model.Source, logi
return true, reason, nil return true, reason, nil
} }
func getRemoteUserToPromote(ctx context.Context, source *auth_model.Source, loginName, email string) (*user_model.User, Reason, error) { func getRemoteUserToPromote(ctx context.Context, source *auth_model.Source, loginName, email string) (*user_model.User, Reason, error) { //nolint:unparam
if !source.IsOAuth2() { if !source.IsOAuth2() {
return nil, NewReason(log.DEBUG, ReasonNotAuth2, "source %v is not OAuth2", source), nil return nil, NewReason(log.DEBUG, ReasonNotAuth2, "source %v is not OAuth2", source), nil
} }