gotosocial/internal/processing/status/boost.go

292 lines
8.8 KiB
Go
Raw Normal View History

// GoToSocial
// Copyright (C) GoToSocial Authors admin@gotosocial.org
// SPDX-License-Identifier: AGPL-3.0-or-later
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package status
import (
"context"
"errors"
"fmt"
2021-08-31 15:59:12 +02:00
"github.com/superseriousbusiness/gotosocial/internal/ap"
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
2021-08-31 15:59:12 +02:00
"github.com/superseriousbusiness/gotosocial/internal/messages"
"github.com/superseriousbusiness/gotosocial/internal/util"
)
// BoostCreate processes the boost/reblog of target
// status, returning the newly-created boost.
func (p *Processor) BoostCreate(
ctx context.Context,
requester *gtsmodel.Account,
application *gtsmodel.Application,
targetID string,
) (*apimodel.Status, gtserror.WithCode) {
// Get target status and ensure it's not a boost.
target, errWithCode := p.c.GetVisibleTargetStatus(
ctx,
requester,
targetID,
nil, // default freshness
)
if errWithCode != nil {
return nil, errWithCode
}
// Unwrap target in case it is a boost.
target, errWithCode = p.c.UnwrapIfBoost(
ctx,
requester,
target,
)
if errWithCode != nil {
return nil, errWithCode
}
// Check is viable target.
if target.BoostOfID != "" {
err := gtserror.Newf("target status %s is boost wrapper", target.URI)
return nil, gtserror.NewErrorUnprocessableEntity(err)
}
// Ensure valid boost target for requester.
policyResult, err := p.intFilter.StatusBoostable(ctx,
requester,
target,
)
if err != nil {
err := gtserror.Newf("error seeing if status %s is boostable: %w", target.ID, err)
return nil, gtserror.NewErrorInternalError(err)
}
if policyResult.Forbidden() {
const errText = "you do not have permission to boost this status"
err := gtserror.New(errText)
return nil, gtserror.NewErrorForbidden(err, errText)
}
// Status is visible and boostable
// (though maybe pending approval).
boost, err := p.converter.StatusToBoost(ctx,
target,
requester,
application.ID,
)
if err != nil {
return nil, gtserror.NewErrorInternalError(err)
}
// Derive pendingApproval status.
var pendingApproval bool
switch {
case policyResult.WithApproval():
// We're allowed to do
// this pending approval.
pendingApproval = true
case policyResult.MatchedOnCollection():
// We're permitted to do this, but since
// we matched due to presence in a followers
// or following collection, we should mark
// as pending approval and wait until we can
// prove it's been Accepted by the target.
pendingApproval = true
if *target.Local {
// If the target is local we don't need
// to wait for an Accept from remote,
// we can just preapprove it and have
// the processor create the Accept.
boost.PreApproved = true
}
case policyResult.Permitted():
// We're permitted to do this
// based on another kind of match.
pendingApproval = false
}
boost.PendingApproval = &pendingApproval
// Store the new boost.
if err := p.state.DB.PutStatus(ctx, boost); err != nil {
return nil, gtserror.NewErrorInternalError(err)
}
// Process side effects asynchronously.
[performance] update remaining worker pools to use queues (#2865) * start replacing client + federator + media workers with new worker + queue types * refactor federatingDB.Delete(), drop queued messages when deleting account / status * move all queue purging to the processor workers * undo toolchain updates * code comments, ensure dereferencer worker pool gets started * update gruf libraries in readme * start the job scheduler separately to the worker pools * reshuffle ordering or server.go + remove duplicate worker start / stop * update go-list version * fix vendoring * move queue invalidation to before wipeing / deletion, to ensure queued work not dropped * add logging to worker processing functions in testrig, don't start workers in unexpected places * update go-structr to add (+then rely on) QueueCtx{} type * ensure more worker pools get started properly in tests * fix remaining broken tests relying on worker queue logic * fix account test suite queue popping logic, ensure noop workers do not pull from queue * move back accidentally shuffled account deletion order * ensure error (non nil!!) gets passed in refactored federatingDB{}.Delete() * silently drop deletes from accounts not permitted to * don't warn log on forwarded deletes * make if else clauses easier to parse * use getFederatorMsg() * improved code comment * improved code comment re: requesting account delete checks * remove boolean result from worker start / stop since false = already running or already stopped * remove optional passed-in http.client * remove worker starting from the admin CLI commands (we don't need to handle side-effects) * update prune cli to start scheduler but not all of the workers * fix rebase issues * remove redundant return statements * i'm sorry sir linter
2024-04-26 14:50:46 +02:00
p.state.Workers.Client.Queue.Push(&messages.FromClientAPI{
2021-08-31 15:59:12 +02:00
APObjectType: ap.ActivityAnnounce,
APActivityType: ap.ActivityCreate,
GTSModel: boost,
[performance] update remaining worker pools to use queues (#2865) * start replacing client + federator + media workers with new worker + queue types * refactor federatingDB.Delete(), drop queued messages when deleting account / status * move all queue purging to the processor workers * undo toolchain updates * code comments, ensure dereferencer worker pool gets started * update gruf libraries in readme * start the job scheduler separately to the worker pools * reshuffle ordering or server.go + remove duplicate worker start / stop * update go-list version * fix vendoring * move queue invalidation to before wipeing / deletion, to ensure queued work not dropped * add logging to worker processing functions in testrig, don't start workers in unexpected places * update go-structr to add (+then rely on) QueueCtx{} type * ensure more worker pools get started properly in tests * fix remaining broken tests relying on worker queue logic * fix account test suite queue popping logic, ensure noop workers do not pull from queue * move back accidentally shuffled account deletion order * ensure error (non nil!!) gets passed in refactored federatingDB{}.Delete() * silently drop deletes from accounts not permitted to * don't warn log on forwarded deletes * make if else clauses easier to parse * use getFederatorMsg() * improved code comment * improved code comment re: requesting account delete checks * remove boolean result from worker start / stop since false = already running or already stopped * remove optional passed-in http.client * remove worker starting from the admin CLI commands (we don't need to handle side-effects) * update prune cli to start scheduler but not all of the workers * fix rebase issues * remove redundant return statements * i'm sorry sir linter
2024-04-26 14:50:46 +02:00
Origin: requester,
Target: target.Account,
})
// If the boost target status replies to a status
// that we own, and has a pending interaction
// request, use the boost as an implicit accept.
implicitlyAccepted, errWithCode := p.implicitlyAccept(ctx,
requester, target,
)
if errWithCode != nil {
return nil, errWithCode
}
// If we ended up implicitly accepting, mark the
// target status as no longer pending approval so
// it's serialized properly via the API.
if implicitlyAccepted {
target.PendingApproval = util.Ptr(false)
}
return p.c.GetAPIStatus(ctx, requester, boost)
}
// BoostRemove processes the unboost/unreblog of
// target status, returning the target status.
func (p *Processor) BoostRemove(
ctx context.Context,
requester *gtsmodel.Account,
application *gtsmodel.Application,
targetID string,
) (*apimodel.Status, gtserror.WithCode) {
// Get target status and ensure it's not a boost.
target, errWithCode := p.c.GetVisibleTargetStatus(
ctx,
requester,
targetID,
nil, // default freshness
)
if errWithCode != nil {
return nil, errWithCode
}
target, errWithCode = p.c.UnwrapIfBoost(
ctx,
requester,
target,
)
if errWithCode != nil {
return nil, errWithCode
}
// Check whether requester has actually
// boosted target, by trying to get the boost.
boost, err := p.state.DB.GetStatusBoost(ctx,
target.ID,
requester.ID,
)
if err != nil && !errors.Is(err, db.ErrNoEntries) {
err = gtserror.Newf("db error getting boost of %s: %w", target.ID, err)
return nil, gtserror.NewErrorInternalError(err)
}
if boost != nil {
// Status was boosted. Process unboost side effects asynchronously.
[performance] update remaining worker pools to use queues (#2865) * start replacing client + federator + media workers with new worker + queue types * refactor federatingDB.Delete(), drop queued messages when deleting account / status * move all queue purging to the processor workers * undo toolchain updates * code comments, ensure dereferencer worker pool gets started * update gruf libraries in readme * start the job scheduler separately to the worker pools * reshuffle ordering or server.go + remove duplicate worker start / stop * update go-list version * fix vendoring * move queue invalidation to before wipeing / deletion, to ensure queued work not dropped * add logging to worker processing functions in testrig, don't start workers in unexpected places * update go-structr to add (+then rely on) QueueCtx{} type * ensure more worker pools get started properly in tests * fix remaining broken tests relying on worker queue logic * fix account test suite queue popping logic, ensure noop workers do not pull from queue * move back accidentally shuffled account deletion order * ensure error (non nil!!) gets passed in refactored federatingDB{}.Delete() * silently drop deletes from accounts not permitted to * don't warn log on forwarded deletes * make if else clauses easier to parse * use getFederatorMsg() * improved code comment * improved code comment re: requesting account delete checks * remove boolean result from worker start / stop since false = already running or already stopped * remove optional passed-in http.client * remove worker starting from the admin CLI commands (we don't need to handle side-effects) * update prune cli to start scheduler but not all of the workers * fix rebase issues * remove redundant return statements * i'm sorry sir linter
2024-04-26 14:50:46 +02:00
p.state.Workers.Client.Queue.Push(&messages.FromClientAPI{
APObjectType: ap.ActivityAnnounce,
APActivityType: ap.ActivityUndo,
GTSModel: boost,
[performance] update remaining worker pools to use queues (#2865) * start replacing client + federator + media workers with new worker + queue types * refactor federatingDB.Delete(), drop queued messages when deleting account / status * move all queue purging to the processor workers * undo toolchain updates * code comments, ensure dereferencer worker pool gets started * update gruf libraries in readme * start the job scheduler separately to the worker pools * reshuffle ordering or server.go + remove duplicate worker start / stop * update go-list version * fix vendoring * move queue invalidation to before wipeing / deletion, to ensure queued work not dropped * add logging to worker processing functions in testrig, don't start workers in unexpected places * update go-structr to add (+then rely on) QueueCtx{} type * ensure more worker pools get started properly in tests * fix remaining broken tests relying on worker queue logic * fix account test suite queue popping logic, ensure noop workers do not pull from queue * move back accidentally shuffled account deletion order * ensure error (non nil!!) gets passed in refactored federatingDB{}.Delete() * silently drop deletes from accounts not permitted to * don't warn log on forwarded deletes * make if else clauses easier to parse * use getFederatorMsg() * improved code comment * improved code comment re: requesting account delete checks * remove boolean result from worker start / stop since false = already running or already stopped * remove optional passed-in http.client * remove worker starting from the admin CLI commands (we don't need to handle side-effects) * update prune cli to start scheduler but not all of the workers * fix rebase issues * remove redundant return statements * i'm sorry sir linter
2024-04-26 14:50:46 +02:00
Origin: requester,
Target: target.Account,
})
}
return p.c.GetAPIStatus(ctx, requester, target)
}
// StatusBoostedBy returns a slice of accounts that have boosted the given status, filtered according to privacy settings.
func (p *Processor) StatusBoostedBy(ctx context.Context, requestingAccount *gtsmodel.Account, targetStatusID string) ([]*apimodel.Account, gtserror.WithCode) {
targetStatus, err := p.state.DB.GetStatusByID(ctx, targetStatusID)
if err != nil {
wrapped := fmt.Errorf("BoostedBy: error fetching status %s: %s", targetStatusID, err)
if !errors.Is(err, db.ErrNoEntries) {
return nil, gtserror.NewErrorInternalError(wrapped)
}
return nil, gtserror.NewErrorNotFound(wrapped)
}
if boostOfID := targetStatus.BoostOfID; boostOfID != "" {
// the target status is a boost wrapper, redirect this request to the status it boosts
boostedStatus, err := p.state.DB.GetStatusByID(ctx, boostOfID)
if err != nil {
wrapped := fmt.Errorf("BoostedBy: error fetching status %s: %s", boostOfID, err)
if !errors.Is(err, db.ErrNoEntries) {
return nil, gtserror.NewErrorInternalError(wrapped)
}
return nil, gtserror.NewErrorNotFound(wrapped)
}
targetStatus = boostedStatus
}
visible, err := p.visFilter.StatusVisible(ctx, requestingAccount, targetStatus)
if err != nil {
err = fmt.Errorf("BoostedBy: error seeing if status %s is visible: %s", targetStatus.ID, err)
return nil, gtserror.NewErrorNotFound(err)
}
if !visible {
err = errors.New("BoostedBy: status is not visible")
return nil, gtserror.NewErrorNotFound(err)
}
statusBoosts, err := p.state.DB.GetStatusBoosts(ctx, targetStatus.ID)
if err != nil {
err = fmt.Errorf("BoostedBy: error seeing who boosted status: %s", err)
return nil, gtserror.NewErrorNotFound(err)
}
// filter account IDs so the user doesn't see accounts they blocked or which blocked them
accountIDs := make([]string, 0, len(statusBoosts))
for _, s := range statusBoosts {
[performance] refactoring + add fave / follow / request / visibility caching (#1607) * refactor visibility checking, add caching for visibility * invalidate visibility cache items on account / status deletes * fix requester ID passed to visibility cache nil ptr * de-interface caches, fix home / public timeline caching + visibility * finish adding code comments for visibility filter * fix angry goconst linter warnings * actually finish adding filter visibility code comments for timeline functions * move home timeline status author check to after visibility * remove now-unused code * add more code comments * add TODO code comment, update printed cache start names * update printed cache names on stop * start adding separate follow(request) delete db functions, add specific visibility cache tests * add relationship type caching * fix getting local account follows / followed-bys, other small codebase improvements * simplify invalidation using cache hooks, add more GetAccountBy___() functions * fix boosting to return 404 if not boostable but no error (to not leak status ID) * remove dead code * improved placement of cache invalidation * update license headers * add example follow, follow-request config entries * add example visibility cache configuration to config file * use specific PutFollowRequest() instead of just Put() * add tests for all GetAccountBy() * add GetBlockBy() tests * update block to check primitive fields * update and finish adding Get{Account,Block,Follow,FollowRequest}By() tests * fix copy-pasted code * update envparsing test * whitespace * fix bun struct tag * add license header to gtscontext * fix old license header * improved error creation to not use fmt.Errorf() when not needed * fix various rebase conflicts, fix account test * remove commented-out code, fix-up mention caching * fix mention select bun statement * ensure mention target account populated, pass in context to customrenderer logging * remove more uncommented code, fix typeutil test * add statusfave database model caching * add status fave cache configuration * add status fave cache example config * woops, catch missed error. nice catch linter! * add back testrig panic on nil db * update example configuration to match defaults, slight tweak to cache configuration defaults * update envparsing test with new defaults * fetch followingget to use the follow target account * use accounnt.IsLocal() instead of empty domain check * use constants for the cache visibility type check * use bun.In() for notification type restriction in db query * include replies when fetching PublicTimeline() (to account for single-author threads in Visibility{}.StatusPublicTimelineable()) * use bun query building for nested select statements to ensure working with postgres * update public timeline future status checks to match visibility filter * same as previous, for home timeline * update public timeline tests to dynamically check for appropriate statuses * migrate accounts to allow unique constraint on public_key * provide minimal account with publicKey --------- Signed-off-by: kim <grufwub@gmail.com> Co-authored-by: tsmethurst <tobi.smethurst@protonmail.com>
2023-03-28 15:03:14 +02:00
blocked, err := p.state.DB.IsEitherBlocked(ctx, requestingAccount.ID, s.AccountID)
if err != nil {
err = fmt.Errorf("BoostedBy: error checking blocks: %s", err)
return nil, gtserror.NewErrorNotFound(err)
}
if !blocked {
accountIDs = append(accountIDs, s.AccountID)
}
}
// TODO: filter other things here? suspended? muted? silenced?
// fetch accounts + create their API representations
apiAccounts := make([]*apimodel.Account, 0, len(accountIDs))
for _, accountID := range accountIDs {
account, err := p.state.DB.GetAccountByID(ctx, accountID)
if err != nil {
wrapped := fmt.Errorf("BoostedBy: error fetching account %s: %s", accountID, err)
if !errors.Is(err, db.ErrNoEntries) {
return nil, gtserror.NewErrorInternalError(wrapped)
}
return nil, gtserror.NewErrorNotFound(wrapped)
}
apiAccount, err := p.converter.AccountToAPIAccountPublic(ctx, account)
if err != nil {
err = fmt.Errorf("BoostedBy: error converting account to api model: %s", err)
return nil, gtserror.NewErrorInternalError(err)
}
apiAccounts = append(apiAccounts, apiAccount)
}
return apiAccounts, nil
}