format with gofumpt (#18184)
* gofumpt -w -l . * gofumpt -w -l -extra . * Add linter * manual fix * change make fmt
This commit is contained in:
parent
1d98d205f5
commit
54e9ee37a7
423 changed files with 1585 additions and 1758 deletions
|
@ -12,9 +12,9 @@ import (
|
|||
|
||||
"code.gitea.io/gitea/models/db"
|
||||
"code.gitea.io/gitea/modules/timeutil"
|
||||
"xorm.io/xorm"
|
||||
|
||||
"github.com/duo-labs/webauthn/webauthn"
|
||||
"xorm.io/xorm"
|
||||
)
|
||||
|
||||
// ErrWebAuthnCredentialNotExist represents a "ErrWebAuthnCRedentialNotExist" kind of error.
|
||||
|
@ -30,14 +30,14 @@ func (err ErrWebAuthnCredentialNotExist) Error() string {
|
|||
return fmt.Sprintf("WebAuthn credential does not exist [credential_id: %s]", err.CredentialID)
|
||||
}
|
||||
|
||||
//IsErrWebAuthnCredentialNotExist checks if an error is a ErrWebAuthnCredentialNotExist.
|
||||
// IsErrWebAuthnCredentialNotExist checks if an error is a ErrWebAuthnCredentialNotExist.
|
||||
func IsErrWebAuthnCredentialNotExist(err error) bool {
|
||||
_, ok := err.(ErrWebAuthnCredentialNotExist)
|
||||
return ok
|
||||
}
|
||||
|
||||
//WebAuthnCredential represents the WebAuthn credential data for a public-key
|
||||
//credential conformant to WebAuthn Level 1
|
||||
// WebAuthnCredential represents the WebAuthn credential data for a public-key
|
||||
// credential conformant to WebAuthn Level 1
|
||||
type WebAuthnCredential struct {
|
||||
ID int64 `xorm:"pk autoincr"`
|
||||
Name string
|
||||
|
@ -109,7 +109,7 @@ func (list WebAuthnCredentialList) ToCredentials() []webauthn.Credential {
|
|||
return creds
|
||||
}
|
||||
|
||||
//GetWebAuthnCredentialsByUID returns all WebAuthn credentials of the given user
|
||||
// GetWebAuthnCredentialsByUID returns all WebAuthn credentials of the given user
|
||||
func GetWebAuthnCredentialsByUID(uid int64) (WebAuthnCredentialList, error) {
|
||||
return getWebAuthnCredentialsByUID(db.DefaultContext, uid)
|
||||
}
|
||||
|
@ -119,7 +119,7 @@ func getWebAuthnCredentialsByUID(ctx context.Context, uid int64) (WebAuthnCreden
|
|||
return creds, db.GetEngine(ctx).Where("user_id = ?", uid).Find(&creds)
|
||||
}
|
||||
|
||||
//ExistsWebAuthnCredentialsForUID returns if the given user has credentials
|
||||
// ExistsWebAuthnCredentialsForUID returns if the given user has credentials
|
||||
func ExistsWebAuthnCredentialsForUID(uid int64) (bool, error) {
|
||||
return existsWebAuthnCredentialsByUID(db.DefaultContext, uid)
|
||||
}
|
||||
|
@ -211,7 +211,7 @@ func deleteCredential(ctx context.Context, id, userID int64) (bool, error) {
|
|||
return had > 0, err
|
||||
}
|
||||
|
||||
//WebAuthnCredentials implementns the webauthn.User interface
|
||||
// WebAuthnCredentials implementns the webauthn.User interface
|
||||
func WebAuthnCredentials(userID int64) ([]webauthn.Credential, error) {
|
||||
dbCreds, err := GetWebAuthnCredentialsByUID(userID)
|
||||
if err != nil {
|
||||
|
|
|
@ -175,12 +175,12 @@ func generateEmailAvatarLink(email string, size int, final bool) string {
|
|||
return DefaultAvatarLink()
|
||||
}
|
||||
|
||||
//GenerateEmailAvatarFastLink returns a avatar link (fast, the link may be a delegated one: "/avatar/${hash}")
|
||||
// GenerateEmailAvatarFastLink returns a avatar link (fast, the link may be a delegated one: "/avatar/${hash}")
|
||||
func GenerateEmailAvatarFastLink(email string, size int) string {
|
||||
return generateEmailAvatarLink(email, size, false)
|
||||
}
|
||||
|
||||
//GenerateEmailAvatarFinalLink returns a avatar final link (maybe slow)
|
||||
// GenerateEmailAvatarFinalLink returns a avatar final link (maybe slow)
|
||||
func GenerateEmailAvatarFinalLink(email string, size int) string {
|
||||
return generateEmailAvatarLink(email, size, true)
|
||||
}
|
||||
|
|
|
@ -240,7 +240,6 @@ func FixIssueLabelWithOutsideLabels() (int64, error) {
|
|||
WHERE
|
||||
(label.org_id = 0 AND issue.repo_id != label.repo_id) OR (label.repo_id = 0 AND label.org_id != repository.owner_id)
|
||||
) AS il_too )`)
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
|
|
@ -121,7 +121,7 @@ func newXORMEngine() (*xorm.Engine, error) {
|
|||
return engine, nil
|
||||
}
|
||||
|
||||
//SyncAllTables sync the schemas of all tables, is required by unit test code
|
||||
// SyncAllTables sync the schemas of all tables, is required by unit test code
|
||||
func SyncAllTables() error {
|
||||
return x.StoreEngine("InnoDB").Sync2(tables...)
|
||||
}
|
||||
|
|
|
@ -394,7 +394,6 @@ func TestIssue_InsertIssue(t *testing.T) {
|
|||
issue = testInsertIssue(t, `my issue2, this is my son's love \n \r \ `, "special issue's '' comments?", 7)
|
||||
_, err = db.GetEngine(db.DefaultContext).ID(issue.ID).Delete(new(Issue))
|
||||
assert.NoError(t, err)
|
||||
|
||||
}
|
||||
|
||||
func TestIssue_ResolveMentions(t *testing.T) {
|
||||
|
|
|
@ -155,7 +155,6 @@ func FetchIssueContentHistoryList(dbCtx context.Context, issueID, commentID int6
|
|||
Where(builder.Eq{"issue_id": issueID, "comment_id": commentID}).
|
||||
OrderBy("edited_unix DESC").
|
||||
Find(&res)
|
||||
|
||||
if err != nil {
|
||||
log.Error("can not fetch issue content history list. err=%v", err)
|
||||
return nil, err
|
||||
|
|
|
@ -209,7 +209,7 @@ func LFSAutoAssociate(metas []*LFSMetaObject, user *user_model.User, repoID int6
|
|||
func IterateLFS(f func(mo *LFSMetaObject) error) error {
|
||||
var start int
|
||||
const batchSize = 100
|
||||
var e = db.GetEngine(db.DefaultContext)
|
||||
e := db.GetEngine(db.DefaultContext)
|
||||
for {
|
||||
mos := make([]*LFSMetaObject, 0, batchSize)
|
||||
if err := e.Limit(batchSize, start).Find(&mos); err != nil {
|
||||
|
|
|
@ -45,7 +45,7 @@ func assertCreateIssues(t *testing.T, reponame string, isPull bool) {
|
|||
}
|
||||
|
||||
title := "issuetitle1"
|
||||
var is = &Issue{
|
||||
is := &Issue{
|
||||
RepoID: repo.ID,
|
||||
MilestoneID: milestone.ID,
|
||||
Repo: repo,
|
||||
|
@ -130,7 +130,7 @@ func TestMigrate_InsertPullRequests(t *testing.T) {
|
|||
repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{Name: reponame}).(*repo_model.Repository)
|
||||
owner := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: repo.OwnerID}).(*user_model.User)
|
||||
|
||||
var i = &Issue{
|
||||
i := &Issue{
|
||||
RepoID: repo.ID,
|
||||
Repo: repo,
|
||||
Title: "title1",
|
||||
|
@ -140,7 +140,7 @@ func TestMigrate_InsertPullRequests(t *testing.T) {
|
|||
Poster: owner,
|
||||
}
|
||||
|
||||
var p = &PullRequest{
|
||||
p := &PullRequest{
|
||||
Issue: i,
|
||||
}
|
||||
|
||||
|
|
|
@ -219,10 +219,10 @@ func prepareTestEnv(t *testing.T, skip int, syncModels ...interface{}) (*xorm.En
|
|||
assert.NoError(t, err, "unable to read the new repo root: %v\n", err)
|
||||
}
|
||||
for _, repoDir := range repoDirs {
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "pack"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "info"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "heads"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "tag"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "pack"), 0o755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "info"), 0o755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "heads"), 0o755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "tag"), 0o755)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -170,7 +170,7 @@ func (log *TestLogger) Init(config string) error {
|
|||
func (log *TestLogger) Flush() {
|
||||
}
|
||||
|
||||
//ReleaseReopen does nothing
|
||||
// ReleaseReopen does nothing
|
||||
func (log *TestLogger) ReleaseReopen() error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -124,5 +124,4 @@ func Test_removeInvalidLabels(t *testing.T) {
|
|||
t.Errorf("IssueLabel[%d] was deleted but should have remained", id)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -85,5 +85,4 @@ func Test_deleteOrphanedIssueLabels(t *testing.T) {
|
|||
pre := preMigration[id]
|
||||
assert.Equal(t, pre, post, "migration changed issueLabel %d", id)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ func Test_addPrimaryEmail2EmailAddress(t *testing.T) {
|
|||
IsPrimary bool `xorm:"DEFAULT(false) NOT NULL"`
|
||||
}
|
||||
|
||||
var users = make([]User, 0, 20)
|
||||
users := make([]User, 0, 20)
|
||||
err = x.Find(&users)
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
|
|
@ -37,10 +37,10 @@ func Test_addIssueResourceIndexTable(t *testing.T) {
|
|||
MaxIndex int64 `xorm:"index"`
|
||||
}
|
||||
|
||||
var start = 0
|
||||
start := 0
|
||||
const batchSize = 1000
|
||||
for {
|
||||
var indexes = make([]ResourceIndex, 0, batchSize)
|
||||
indexes := make([]ResourceIndex, 0, batchSize)
|
||||
err := x.Table("issue_index").Limit(batchSize, start).Find(&indexes)
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
|
|
@ -26,7 +26,6 @@ func (ls *LoginSourceOriginalV189) TableName() string {
|
|||
}
|
||||
|
||||
func Test_unwrapLDAPSourceCfg(t *testing.T) {
|
||||
|
||||
// Prepare and load the testing database
|
||||
x, deferable := prepareTestEnv(t, 0, new(LoginSourceOriginalV189))
|
||||
if x == nil || t.Failed() {
|
||||
|
@ -80,5 +79,4 @@ func Test_unwrapLDAPSourceCfg(t *testing.T) {
|
|||
assert.EqualValues(t, source.ID%2 == 0, source.IsActive, "unwrapLDAPSourceCfg failed for %d", source.ID)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -11,7 +11,6 @@ import (
|
|||
)
|
||||
|
||||
func alterIssueAndCommentTextFieldsToLongText(x *xorm.Engine) error {
|
||||
|
||||
sess := x.NewSession()
|
||||
defer sess.Close()
|
||||
if err := sess.Begin(); err != nil {
|
||||
|
|
|
@ -40,10 +40,10 @@ func Test_addTableCommitStatusIndex(t *testing.T) {
|
|||
MaxIndex int64 `xorm:"index"`
|
||||
}
|
||||
|
||||
var start = 0
|
||||
start := 0
|
||||
const batchSize = 1000
|
||||
for {
|
||||
var indexes = make([]CommitStatusIndex, 0, batchSize)
|
||||
indexes := make([]CommitStatusIndex, 0, batchSize)
|
||||
err := x.Table("commit_status_index").Limit(batchSize, start).Find(&indexes)
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
|
|
@ -16,7 +16,6 @@ import (
|
|||
)
|
||||
|
||||
func addWebAuthnCred(x *xorm.Engine) error {
|
||||
|
||||
// Create webauthnCredential table
|
||||
type webauthnCredential struct {
|
||||
ID int64 `xorm:"pk autoincr"`
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
)
|
||||
|
||||
func useBase32HexForCredIDInWebAuthnCredential(x *xorm.Engine) error {
|
||||
|
||||
// Create webauthnCredential table
|
||||
type webauthnCredential struct {
|
||||
ID int64 `xorm:"pk autoincr"`
|
||||
|
|
|
@ -718,7 +718,7 @@ func DeleteRepository(doer *user_model.User, uid, repoID int64) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("listDeployKeys: %v", err)
|
||||
}
|
||||
var needRewriteKeysFile = len(deployKeys) > 0
|
||||
needRewriteKeysFile := len(deployKeys) > 0
|
||||
for _, dKey := range deployKeys {
|
||||
if err := DeleteDeployKey(ctx, doer, dKey.ID); err != nil {
|
||||
return fmt.Errorf("deleteDeployKeys: %v", err)
|
||||
|
@ -844,7 +844,7 @@ func DeleteRepository(doer *user_model.User, uid, repoID int64) error {
|
|||
return err
|
||||
}
|
||||
|
||||
var lfsPaths = make([]string, 0, len(lfsObjects))
|
||||
lfsPaths := make([]string, 0, len(lfsObjects))
|
||||
for _, v := range lfsObjects {
|
||||
count, err := sess.Count(&LFSMetaObject{Pointer: lfs.Pointer{Oid: v.Oid}})
|
||||
if err != nil {
|
||||
|
@ -867,7 +867,7 @@ func DeleteRepository(doer *user_model.User, uid, repoID int64) error {
|
|||
return err
|
||||
}
|
||||
|
||||
var archivePaths = make([]string, 0, len(archives))
|
||||
archivePaths := make([]string, 0, len(archives))
|
||||
for _, v := range archives {
|
||||
p, _ := v.RelativePath()
|
||||
archivePaths = append(archivePaths, p)
|
||||
|
@ -893,7 +893,7 @@ func DeleteRepository(doer *user_model.User, uid, repoID int64) error {
|
|||
return err
|
||||
}
|
||||
|
||||
var newAttachmentPaths = make([]string, 0, len(newAttachments))
|
||||
newAttachmentPaths := make([]string, 0, len(newAttachments))
|
||||
for _, attach := range newAttachments {
|
||||
newAttachmentPaths = append(newAttachmentPaths, attach.RelativePath())
|
||||
}
|
||||
|
|
|
@ -90,7 +90,7 @@ type FindRepoArchiversOption struct {
|
|||
}
|
||||
|
||||
func (opts FindRepoArchiversOption) toConds() builder.Cond {
|
||||
var cond = builder.NewCond()
|
||||
cond := builder.NewCond()
|
||||
if opts.OlderThan > 0 {
|
||||
cond = cond.And(builder.Lt{"created_unix": time.Now().Add(-opts.OlderThan).Unix()})
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ func (opts FindRepoArchiversOption) toConds() builder.Cond {
|
|||
|
||||
// FindRepoArchives find repo archivers
|
||||
func FindRepoArchives(opts FindRepoArchiversOption) ([]*RepoArchiver, error) {
|
||||
var archivers = make([]*RepoArchiver, 0, opts.PageSize)
|
||||
archivers := make([]*RepoArchiver, 0, opts.PageSize)
|
||||
start, limit := opts.GetSkipTake()
|
||||
err := db.GetEngine(db.DefaultContext).Where(opts.toConds()).
|
||||
Asc("created_unix").
|
||||
|
|
|
@ -241,7 +241,7 @@ func UpdateAttachmentByUUID(ctx context.Context, attach *Attachment, cols ...str
|
|||
|
||||
// UpdateAttachmentCtx updates the given attachment in database
|
||||
func UpdateAttachmentCtx(ctx context.Context, atta *Attachment) error {
|
||||
var sess = db.GetEngine(ctx).Cols("name", "issue_id", "release_id", "comment_id", "download_count")
|
||||
sess := db.GetEngine(ctx).Cols("name", "issue_id", "release_id", "comment_id", "download_count")
|
||||
if atta.ID != 0 && atta.UUID == "" {
|
||||
sess = sess.ID(atta.ID)
|
||||
} else {
|
||||
|
|
|
@ -17,10 +17,8 @@ import (
|
|||
"xorm.io/xorm"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrMirrorNotExist mirror does not exist error
|
||||
ErrMirrorNotExist = errors.New("Mirror does not exist")
|
||||
)
|
||||
// ErrMirrorNotExist mirror does not exist error
|
||||
var ErrMirrorNotExist = errors.New("Mirror does not exist")
|
||||
|
||||
// RemoteMirrorer defines base methods for pull/push mirrors.
|
||||
type RemoteMirrorer interface {
|
||||
|
|
|
@ -15,10 +15,8 @@ import (
|
|||
"xorm.io/xorm"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrPushMirrorNotExist mirror does not exist error
|
||||
ErrPushMirrorNotExist = errors.New("PushMirror does not exist")
|
||||
)
|
||||
// ErrPushMirrorNotExist mirror does not exist error
|
||||
var ErrPushMirrorNotExist = errors.New("PushMirror does not exist")
|
||||
|
||||
// PushMirror represents mirror information of a repository.
|
||||
type PushMirror struct {
|
||||
|
|
|
@ -199,5 +199,4 @@ func TestDismissReview(t *testing.T) {
|
|||
assert.False(t, rejectReviewExample.Dismissed)
|
||||
assert.False(t, requestReviewExample.Dismissed)
|
||||
assert.True(t, approveReviewExample.Dismissed)
|
||||
|
||||
}
|
||||
|
|
|
@ -117,10 +117,10 @@ func MainTest(m *testing.M, pathToGiteaRoot string, fixtureFiles ...string) {
|
|||
fatalTestError("unable to read the new repo root: %v\n", err)
|
||||
}
|
||||
for _, repoDir := range repoDirs {
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "pack"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "info"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "heads"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "tag"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "pack"), 0o755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "info"), 0o755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "heads"), 0o755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "tag"), 0o755)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -182,10 +182,10 @@ func PrepareTestEnv(t testing.TB) {
|
|||
repoDirs, err := os.ReadDir(filepath.Join(setting.RepoRootPath, ownerDir.Name()))
|
||||
assert.NoError(t, err)
|
||||
for _, repoDir := range repoDirs {
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "pack"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "info"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "heads"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "tag"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "pack"), 0o755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "info"), 0o755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "heads"), 0o755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "tag"), 0o755)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -21,10 +21,8 @@ import (
|
|||
"xorm.io/builder"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrEmailNotActivated e-mail address has not been activated error
|
||||
ErrEmailNotActivated = errors.New("E-mail address has not been activated")
|
||||
)
|
||||
// ErrEmailNotActivated e-mail address has not been activated error
|
||||
var ErrEmailNotActivated = errors.New("E-mail address has not been activated")
|
||||
|
||||
// ErrEmailInvalid represents an error where the email address does not comply with RFC 5322
|
||||
type ErrEmailInvalid struct {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue