Compare commits
54 Commits
develop
...
operation-
Author | SHA1 | Date |
---|---|---|
Alex Gleason | c735444f38 | |
Alex Gleason | 11f03344d3 | |
Alex Gleason | 9cea98b7e4 | |
Alex Gleason | fed5c2871d | |
Alex Gleason | 454307112c | |
Alex Gleason | c368078cd0 | |
Ivan Tashkinov | a126a89358 | |
Alex Gleason | 75ea7b7eb7 | |
Alex Gleason | 0b67c794b2 | |
Alex Gleason | faadb2f325 | |
Alex Gleason | c04afbbfb4 | |
Alex Gleason | f9ae674a57 | |
Alex Gleason | 26b0cbfd06 | |
Alex Gleason | 1654b7fd61 | |
Alex Gleason | 1fd27efbf4 | |
Alex Gleason | 990f98c044 | |
Alex Gleason | bec555066d | |
Alex Gleason | 909f3bcdfe | |
Alex Gleason | bf85d69ad9 | |
Alex Gleason | cec9c4e0b5 | |
Alex Gleason | c9aa89e239 | |
Alex Gleason | 9b97641862 | |
Alex Gleason | fc844b5ebd | |
Alex Gleason | 668bd5da0a | |
Alex Gleason | 15e6b40899 | |
Alex Gleason | 5d89a0ff83 | |
Alex Gleason | 0af8adb265 | |
Alex Gleason | 99c55c97bf | |
Alex Gleason | 032aa8895f | |
Alex Gleason | aedf47c45f | |
Alex Gleason | 51383aa72b | |
Alex Gleason | a13da48d3d | |
Alex Gleason | b6e87ada07 | |
Alex Gleason | e8ad88fded | |
Alex Gleason | b075de066b | |
Alex Gleason | e1146f5ed6 | |
Alex Gleason | 7f698172b9 | |
Alex Gleason | 26a08ef54d | |
Alex Gleason | 91902c87de | |
Alex Gleason | 1776ea1c86 | |
Alex Gleason | 73cde0aeeb | |
Alex Gleason | 6124aef18d | |
Alex Gleason | 416c3f1bda | |
Alex Gleason | 304e6baf47 | |
Alex Gleason | 5cd40ad9de | |
Alex Gleason | 820904d419 | |
Alex Gleason | 40f14b9daa | |
Alex Gleason | 2e82321cef | |
Alex Gleason | 79c7ba7f5d | |
Alex Gleason | 0f81adf448 | |
Alex Gleason | 0dc132a543 | |
Alex Gleason | 745e9814de | |
Alex Gleason | 2bc3505581 | |
Alex Gleason | dbe0f05cec |
|
@ -5,23 +5,6 @@
|
|||
!!! danger
|
||||
These mix tasks can take a long time to complete. Many of them were written to address specific database issues that happened because of bugs in migrations or other specific scenarios. Do not run these tasks "just in case" if everything is fine your instance.
|
||||
|
||||
## Replace embedded objects with their references
|
||||
|
||||
Replaces embedded objects with references to them in the `objects` table. Only needs to be ran once if the instance was created before Pleroma 1.0.5. The reason why this is not a migration is because it could significantly increase the database size after being ran, however after this `VACUUM FULL` will be able to reclaim about 20% (really depends on what is in the database, your mileage may vary) of the db size before the migration.
|
||||
|
||||
=== "OTP"
|
||||
|
||||
```sh
|
||||
./bin/pleroma_ctl database remove_embedded_objects [option ...]
|
||||
```
|
||||
|
||||
=== "From Source"
|
||||
|
||||
```sh
|
||||
mix pleroma.database remove_embedded_objects [option ...]
|
||||
```
|
||||
|
||||
|
||||
### Options
|
||||
- `--vacuum` - run `VACUUM FULL` after the embedded objects are replaced with their references
|
||||
|
||||
|
@ -144,7 +127,7 @@ but should only be run if necessary. **It is safe to cancel this.**
|
|||
|
||||
## Change Text Search Configuration
|
||||
|
||||
Change `default_text_search_config` for database and (if necessary) text_search_config used in index, then rebuild index (it may take time).
|
||||
Change `default_text_search_config` for database and (if necessary) text_search_config used in index, then rebuild index (it may take time).
|
||||
|
||||
=== "OTP"
|
||||
|
||||
|
|
|
@ -20,29 +20,6 @@ defmodule Mix.Tasks.Pleroma.Database do
|
|||
@shortdoc "A collection of database related tasks"
|
||||
@moduledoc File.read!("docs/administration/CLI_tasks/database.md")
|
||||
|
||||
def run(["remove_embedded_objects" | args]) do
|
||||
{options, [], []} =
|
||||
OptionParser.parse(
|
||||
args,
|
||||
strict: [
|
||||
vacuum: :boolean
|
||||
]
|
||||
)
|
||||
|
||||
start_pleroma()
|
||||
Logger.info("Removing embedded objects")
|
||||
|
||||
Repo.query!(
|
||||
"update activities set data = safe_jsonb_set(data, '{object}'::text[], data->'object'->'id') where data->'object'->>'id' is not null;",
|
||||
[],
|
||||
timeout: :infinity
|
||||
)
|
||||
|
||||
if Keyword.get(options, :vacuum) do
|
||||
Maintenance.vacuum("full")
|
||||
end
|
||||
end
|
||||
|
||||
def run(["bump_all_conversations"]) do
|
||||
start_pleroma()
|
||||
Conversation.bump_for_all_activities()
|
||||
|
|
|
@ -26,7 +26,7 @@ defmodule Pleroma.Activity do
|
|||
|
||||
@cachex Pleroma.Config.get([:cachex, :provider], Cachex)
|
||||
|
||||
schema "activities" do
|
||||
schema "objects" do
|
||||
field(:data, :map)
|
||||
field(:local, :boolean, default: true)
|
||||
field(:actor, :string)
|
||||
|
|
|
@ -20,7 +20,7 @@ defmodule Pleroma.Chat.MessageReference do
|
|||
@primary_key {:id, FlakeId.Ecto.Type, autogenerate: true}
|
||||
|
||||
schema "chat_message_references" do
|
||||
belongs_to(:object, Object)
|
||||
belongs_to(:object, Object, type: FlakeId.Ecto.CompatType)
|
||||
belongs_to(:chat, Chat, type: FlakeId.Ecto.CompatType)
|
||||
|
||||
field(:unread, :boolean, default: true)
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
# Pleroma: A lightweight social networking server
|
||||
# Copyright © 2017-2021 Pleroma Authors <https://pleroma.social/>
|
||||
# SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
defmodule Pleroma.DataMigrationFailedId do
|
||||
use Ecto.Schema
|
||||
alias Pleroma.DataMigration
|
||||
|
||||
schema "data_migration_failed_ids" do
|
||||
belongs_to(:data_migration, DataMigration)
|
||||
field(:record_id, FlakeId.Ecto.CompatType)
|
||||
end
|
||||
end
|
|
@ -15,7 +15,7 @@ defmodule Pleroma.Delivery do
|
|||
|
||||
schema "deliveries" do
|
||||
belongs_to(:user, User, type: FlakeId.Ecto.CompatType)
|
||||
belongs_to(:object, Object)
|
||||
belongs_to(:object, Object, type: FlakeId.Ecto.CompatType)
|
||||
end
|
||||
|
||||
def changeset(delivery, params \\ %{}) do
|
||||
|
|
|
@ -10,13 +10,14 @@ defmodule Pleroma.Hashtag do
|
|||
|
||||
alias Ecto.Multi
|
||||
alias Pleroma.Hashtag
|
||||
alias Pleroma.HashtagObject
|
||||
alias Pleroma.Object
|
||||
alias Pleroma.Repo
|
||||
|
||||
schema "hashtags" do
|
||||
field(:name, :string)
|
||||
|
||||
many_to_many(:objects, Object, join_through: "hashtags_objects", on_replace: :delete)
|
||||
many_to_many(:objects, Object, join_through: HashtagObject, on_replace: :delete)
|
||||
|
||||
timestamps()
|
||||
end
|
||||
|
@ -80,7 +81,7 @@ def changeset(%Hashtag{} = struct, params) do
|
|||
|
||||
def unlink(%Object{id: object_id}) do
|
||||
with {_, hashtag_ids} <-
|
||||
from(hto in "hashtags_objects",
|
||||
from(hto in HashtagObject,
|
||||
where: hto.object_id == ^object_id,
|
||||
select: hto.hashtag_id
|
||||
)
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
defmodule Pleroma.HashtagObject do
|
||||
@moduledoc """
|
||||
Through table relationship between hashtags and objects.
|
||||
https://hexdocs.pm/ecto/polymorphic-associations-with-many-to-many.html
|
||||
"""
|
||||
use Ecto.Schema
|
||||
|
||||
alias Pleroma.Hashtag
|
||||
alias Pleroma.Object
|
||||
|
||||
@primary_key false
|
||||
|
||||
schema "hashtags_objects" do
|
||||
belongs_to(:hashtag, Hashtag)
|
||||
belongs_to(:object, Object, type: FlakeId.Ecto.CompatType)
|
||||
end
|
||||
end
|
|
@ -0,0 +1,103 @@
|
|||
# Pleroma: A lightweight social networking server
|
||||
# Copyright © 2017-2021 Pleroma Authors <https://pleroma.social/>
|
||||
# SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
defmodule Pleroma.MigrationHelper.LegacyActivity do
|
||||
@moduledoc """
|
||||
Legacy "activities" schema needed for old migrations.
|
||||
"""
|
||||
use Ecto.Schema
|
||||
|
||||
alias Pleroma.Activity.Queries
|
||||
alias Pleroma.Bookmark
|
||||
alias Pleroma.MigrationHelper.LegacyActivity, as: Activity
|
||||
alias Pleroma.Notification
|
||||
alias Pleroma.Object
|
||||
alias Pleroma.Repo
|
||||
alias Pleroma.ReportNote
|
||||
alias Pleroma.User
|
||||
|
||||
import Ecto.Query
|
||||
|
||||
@type t :: %__MODULE__{}
|
||||
@type actor :: String.t()
|
||||
|
||||
@primary_key {:id, FlakeId.Ecto.CompatType, autogenerate: true}
|
||||
|
||||
schema "activities" do
|
||||
field(:data, :map)
|
||||
field(:local, :boolean, default: true)
|
||||
field(:actor, :string)
|
||||
field(:recipients, {:array, :string}, default: [])
|
||||
field(:thread_muted?, :boolean, virtual: true)
|
||||
|
||||
# A field that can be used if you need to join some kind of other
|
||||
# id to order / paginate this field by
|
||||
field(:pagination_id, :string, virtual: true)
|
||||
|
||||
# This is a fake relation,
|
||||
# do not use outside of with_preloaded_user_actor/with_joined_user_actor
|
||||
has_one(:user_actor, User, on_delete: :nothing, foreign_key: :id)
|
||||
# This is a fake relation, do not use outside of with_preloaded_bookmark/get_bookmark
|
||||
has_one(:bookmark, Bookmark, foreign_key: :activity_id)
|
||||
# This is a fake relation, do not use outside of with_preloaded_report_notes
|
||||
has_many(:report_notes, ReportNote, foreign_key: :activity_id)
|
||||
has_many(:notifications, Notification, on_delete: :delete_all, foreign_key: :activity_id)
|
||||
|
||||
# Attention: this is a fake relation, don't try to preload it blindly and expect it to work!
|
||||
# The foreign key is embedded in a jsonb field.
|
||||
#
|
||||
# To use it, you probably want to do an inner join and a preload:
|
||||
#
|
||||
# ```
|
||||
# |> join(:inner, [activity], o in Object,
|
||||
# on: fragment("(?->>'id') = COALESCE((?)->'object'->> 'id', (?)->>'object')",
|
||||
# o.data, activity.data, activity.data))
|
||||
# |> preload([activity, object], [object: object])
|
||||
# ```
|
||||
#
|
||||
# As a convenience, Activity.with_preloaded_object() sets up an inner join and preload for the
|
||||
# typical case.
|
||||
has_one(:object, Object, on_delete: :nothing, foreign_key: :id)
|
||||
|
||||
timestamps()
|
||||
end
|
||||
|
||||
def with_joined_object(query, join_type \\ :inner) do
|
||||
join(query, join_type, [activity], o in Object,
|
||||
on:
|
||||
fragment(
|
||||
"(?->>'id') = COALESCE(?->'object'->>'id', ?->>'object')",
|
||||
o.data,
|
||||
activity.data,
|
||||
activity.data
|
||||
),
|
||||
as: :object
|
||||
)
|
||||
end
|
||||
|
||||
def with_preloaded_object(query, join_type \\ :inner) do
|
||||
query
|
||||
|> has_named_binding?(:object)
|
||||
|> if(do: query, else: with_joined_object(query, join_type))
|
||||
|> preload([activity, object: object], object: object)
|
||||
end
|
||||
|
||||
def all_by_ids_with_object(ids) do
|
||||
Activity
|
||||
|> where([a], a.id in ^ids)
|
||||
|> with_preloaded_object()
|
||||
|> Repo.all()
|
||||
end
|
||||
|
||||
@doc """
|
||||
Accepts `ap_id` or list of `ap_id`.
|
||||
Returns a query.
|
||||
"""
|
||||
@spec create_by_object_ap_id(String.t() | [String.t()]) :: Ecto.Queryable.t()
|
||||
def create_by_object_ap_id(ap_id) do
|
||||
Activity
|
||||
|> Queries.by_object_id(ap_id)
|
||||
|> Queries.by_type("Create")
|
||||
end
|
||||
end
|
|
@ -0,0 +1,61 @@
|
|||
# Pleroma: A lightweight social networking server
|
||||
# Copyright © 2017-2021 Pleroma Authors <https://pleroma.social/>
|
||||
# SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
defmodule Pleroma.MigrationHelper.ObjectId do
|
||||
@moduledoc """
|
||||
Functions for migrating Object IDs.
|
||||
"""
|
||||
alias Pleroma.Chat.MessageReference
|
||||
alias Pleroma.DataMigrationFailedId
|
||||
alias Pleroma.Delivery
|
||||
alias Pleroma.HashtagObject
|
||||
alias Pleroma.Object
|
||||
alias Pleroma.Repo
|
||||
|
||||
import Ecto.Changeset
|
||||
import Ecto.Query
|
||||
|
||||
@doc "Change an object's ID including all references."
|
||||
def change_id(%Object{id: old_id} = object, new_id) do
|
||||
Repo.transaction(fn ->
|
||||
update_object_fk(MessageReference, old_id, new_id)
|
||||
update_object_fk(Delivery, old_id, new_id)
|
||||
update_object_fk(HashtagObject, old_id, new_id)
|
||||
update_object_fk(DataMigrationFailedId, old_id, new_id, :record_id)
|
||||
|
||||
Repo.update!(change(object, id: new_id))
|
||||
end)
|
||||
end
|
||||
|
||||
defp update_object_fk(schema, old_id, new_id, field \\ :object_id) do
|
||||
binding = [{field, old_id}]
|
||||
|
||||
schema
|
||||
|> where(^binding)
|
||||
|> Repo.update_all(set: [{field, new_id}])
|
||||
end
|
||||
|
||||
@doc "Generate a FlakeId from a datetime."
|
||||
@spec flake_from_time(NaiveDateTime.t()) :: flake_id :: String.t()
|
||||
def flake_from_time(%NaiveDateTime{} = dt) do
|
||||
dt
|
||||
|> build_worker()
|
||||
|> FlakeId.Worker.gen_flake()
|
||||
|> FlakeId.to_string()
|
||||
end
|
||||
|
||||
# Build a one-off FlakeId worker.
|
||||
defp build_worker(%NaiveDateTime{} = dt) do
|
||||
%FlakeId.Worker{
|
||||
node: FlakeId.Worker.worker_id(),
|
||||
time: get_timestamp(dt, :millisecond)
|
||||
}
|
||||
end
|
||||
|
||||
# Convert a NaiveDateTime into a Unix timestamp.
|
||||
@epoch ~N[1970-01-01 00:00:00]
|
||||
defp get_timestamp(%NaiveDateTime{} = dt, unit) do
|
||||
NaiveDateTime.diff(dt, @epoch, unit)
|
||||
end
|
||||
end
|
|
@ -12,10 +12,14 @@ defmodule State do
|
|||
|
||||
use Pleroma.Migrators.Support.BaseMigrator
|
||||
|
||||
alias Pleroma.DataMigrationFailedId
|
||||
alias Pleroma.Hashtag
|
||||
alias Pleroma.HashtagObject
|
||||
alias Pleroma.Migrators.Support.BaseMigrator
|
||||
alias Pleroma.Object
|
||||
|
||||
import Ecto.Query
|
||||
|
||||
@impl BaseMigrator
|
||||
def feature_config_path, do: [:features, :improved_hashtag_timeline]
|
||||
|
||||
|
@ -50,19 +54,20 @@ def perform do
|
|||
|
||||
for failed_id <- failed_ids do
|
||||
_ =
|
||||
Repo.query(
|
||||
"INSERT INTO data_migration_failed_ids(data_migration_id, record_id) " <>
|
||||
"VALUES ($1, $2) ON CONFLICT DO NOTHING;",
|
||||
[data_migration_id, failed_id]
|
||||
)
|
||||
%DataMigrationFailedId{
|
||||
data_migration_id: data_migration_id,
|
||||
record_id: failed_id
|
||||
}
|
||||
|> Repo.insert(on_conflict: :nothing)
|
||||
end
|
||||
|
||||
record_ids = object_ids -- failed_ids
|
||||
|
||||
_ =
|
||||
Repo.query(
|
||||
"DELETE FROM data_migration_failed_ids " <>
|
||||
"WHERE data_migration_id = $1 AND record_id = ANY($2)",
|
||||
[data_migration_id, object_ids -- failed_ids]
|
||||
)
|
||||
DataMigrationFailedId
|
||||
|> where(data_migration_id: ^data_migration_id)
|
||||
|> where([dmf], dmf.record_id in ^record_ids)
|
||||
|> Repo.delete_all()
|
||||
|
||||
max_object_id = Enum.at(object_ids, -1)
|
||||
|
||||
|
@ -120,7 +125,7 @@ defp transfer_object_hashtags(object, hashtags) do
|
|||
|
||||
try do
|
||||
with {rows_count, _} when is_integer(rows_count) <-
|
||||
Repo.insert_all("hashtags_objects", maps, on_conflict: :nothing) do
|
||||
Repo.insert_all(HashtagObject, maps, on_conflict: :nothing) do
|
||||
object.id
|
||||
else
|
||||
e ->
|
||||
|
@ -147,14 +152,13 @@ def retry_failed do
|
|||
|
||||
failed_objects_query()
|
||||
|> Repo.chunk_stream(100, :one)
|
||||
|> Stream.each(fn object ->
|
||||
|> Stream.each(fn %{id: object_id} = object ->
|
||||
with {res, _} when res != :error <- transfer_object_hashtags(object) do
|
||||
_ =
|
||||
Repo.query(
|
||||
"DELETE FROM data_migration_failed_ids " <>
|
||||
"WHERE data_migration_id = $1 AND record_id = $2",
|
||||
[data_migration_id, object.id]
|
||||
)
|
||||
DataMigrationFailedId
|
||||
|> where(data_migration_id: ^data_migration_id)
|
||||
|> where(record_id: ^object_id)
|
||||
|> Repo.delete_all()
|
||||
end
|
||||
end)
|
||||
|> Stream.run()
|
||||
|
@ -167,9 +171,7 @@ def retry_failed do
|
|||
|
||||
defp failed_objects_query do
|
||||
from(o in Object)
|
||||
|> join(:inner, [o], dmf in fragment("SELECT * FROM data_migration_failed_ids"),
|
||||
on: dmf.record_id == o.id
|
||||
)
|
||||
|> join(:inner, [o], dmf in DataMigrationFailedId, on: dmf.record_id == o.id)
|
||||
|> where([_o, dmf], dmf.data_migration_id == ^data_migration_id())
|
||||
|> order_by([o], asc: o.id)
|
||||
end
|
||||
|
|
|
@ -138,7 +138,7 @@ defp exclude_blocked(query, user, opts) do
|
|||
blocked_ap_ids = opts[:blocked_users_ap_ids] || User.blocked_users_ap_ids(user)
|
||||
|
||||
query
|
||||
|> where([n, a], a.actor not in ^blocked_ap_ids)
|
||||
|> where([n, a], fragment("not (? && ?)", [a.actor], ^blocked_ap_ids))
|
||||
|> FollowingRelationship.keep_following_or_not_domain_blocked(user)
|
||||
end
|
||||
|
||||
|
@ -149,7 +149,7 @@ defp exclude_blockers(query, user) do
|
|||
blocker_ap_ids = User.incoming_relationships_ungrouped_ap_ids(user, [:block])
|
||||
|
||||
query
|
||||
|> where([n, a], a.actor not in ^blocker_ap_ids)
|
||||
|> where([n, a], fragment("not (? && ?)", [a.actor], ^blocker_ap_ids))
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@ defmodule Pleroma.Object do
|
|||
alias Pleroma.Activity
|
||||
alias Pleroma.Config
|
||||
alias Pleroma.Hashtag
|
||||
alias Pleroma.HashtagObject
|
||||
alias Pleroma.Object
|
||||
alias Pleroma.Object.Fetcher
|
||||
alias Pleroma.ObjectTombstone
|
||||
|
@ -22,6 +23,8 @@ defmodule Pleroma.Object do
|
|||
|
||||
@type t() :: %__MODULE__{}
|
||||
|
||||
@primary_key {:id, FlakeId.Ecto.CompatType, autogenerate: true}
|
||||
|
||||
@derive {Jason.Encoder, only: [:data]}
|
||||
|
||||
@cachex Pleroma.Config.get([:cachex, :provider], Cachex)
|
||||
|
@ -29,7 +32,7 @@ defmodule Pleroma.Object do
|
|||
schema "objects" do
|
||||
field(:data, :map)
|
||||
|
||||
many_to_many(:hashtags, Hashtag, join_through: "hashtags_objects", on_replace: :delete)
|
||||
many_to_many(:hashtags, Hashtag, join_through: HashtagObject, on_replace: :delete)
|
||||
|
||||
timestamps()
|
||||
end
|
||||
|
|
|
@ -11,6 +11,7 @@ defmodule Pleroma.Web.ActivityPub.ActivityPub do
|
|||
alias Pleroma.Conversation.Participation
|
||||
alias Pleroma.Filter
|
||||
alias Pleroma.Hashtag
|
||||
alias Pleroma.HashtagObject
|
||||
alias Pleroma.Maps
|
||||
alias Pleroma.Notification
|
||||
alias Pleroma.Object
|
||||
|
@ -96,14 +97,6 @@ defp increase_replies_count_if_reply(%{
|
|||
|
||||
defp increase_replies_count_if_reply(_create_data), do: :noop
|
||||
|
||||
@object_types ~w[ChatMessage Question Answer Audio Video Event Article Note Page]
|
||||
@impl true
|
||||
def persist(%{"type" => type} = object, meta) when type in @object_types do
|
||||
with {:ok, object} <- Object.create(object) do
|
||||
{:ok, object, meta}
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def persist(object, meta) do
|
||||
with local <- Keyword.fetch!(meta, :local),
|
||||
|
@ -780,8 +773,8 @@ defp restrict_embedded_tag_reject_any(query, %{tag_reject: tag_reject})
|
|||
defp restrict_embedded_tag_reject_any(query, _), do: query
|
||||
|
||||
defp object_ids_query_for_tags(tags) do
|
||||
from(hto in "hashtags_objects")
|
||||
|> join(:inner, [hto], ht in Pleroma.Hashtag, on: hto.hashtag_id == ht.id)
|
||||
from(hto in HashtagObject)
|
||||
|> join(:inner, [hto], ht in Hashtag, on: hto.hashtag_id == ht.id)
|
||||
|> where([hto, ht], ht.name in ^tags)
|
||||
|> select([hto], hto.object_id)
|
||||
|> distinct([hto], true)
|
||||
|
@ -830,7 +823,7 @@ defp restrict_hashtag_any(query, %{tag: [_ | _] = tags}) do
|
|||
# Note: NO extra ordering should be done on "activities.id desc nulls last" for optimal plan
|
||||
from(
|
||||
[_activity, object] in query,
|
||||
join: hto in "hashtags_objects",
|
||||
join: hto in HashtagObject,
|
||||
on: hto.object_id == object.id,
|
||||
where: hto.hashtag_id in ^hashtag_ids,
|
||||
distinct: [desc: object.id],
|
||||
|
@ -1035,7 +1028,7 @@ defp restrict_blocked(query, %{blocking_user: %User{} = user} = opts) do
|
|||
from(
|
||||
[activity, object: o] in query,
|
||||
# You don't block the author
|
||||
where: fragment("not (? = ANY(?))", activity.actor, ^blocked_ap_ids),
|
||||
where: fragment("not (? && ?)", [activity.actor], ^blocked_ap_ids),
|
||||
|
||||
# You don't block any recipients, and didn't author the post
|
||||
where:
|
||||
|
@ -1099,7 +1092,7 @@ defp restrict_blockers_visibility(query, %{blocking_user: %User{} = user}) do
|
|||
from(
|
||||
activity in query,
|
||||
# The author doesn't block you
|
||||
where: fragment("not (? = ANY(?))", activity.actor, ^blocker_ap_ids),
|
||||
where: fragment("not (? && ?)", [activity.actor], ^blocker_ap_ids),
|
||||
|
||||
# It's not a boost of a user that blocks you
|
||||
where:
|
||||
|
@ -1165,7 +1158,7 @@ defp restrict_muted_reblogs(query, _), do: query
|
|||
defp restrict_instance(query, %{instance: instance}) when is_binary(instance) do
|
||||
from(
|
||||
activity in query,
|
||||
where: fragment("split_part(actor::text, '/'::text, 3) = ?", ^instance)
|
||||
where: fragment("split_part(?::text, '/'::text, 3) = ?", activity.actor, ^instance)
|
||||
)
|
||||
end
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ defmacro status_object_fields do
|
|||
|
||||
field(:context, :string)
|
||||
# short identifier for PleromaFE to group statuses by context
|
||||
field(:context_id, :integer)
|
||||
field(:context_id, :string)
|
||||
|
||||
field(:sensitive, :boolean, default: false)
|
||||
field(:replies_count, :integer, default: 0)
|
||||
|
|
|
@ -712,9 +712,10 @@ defp build_flag_object(%{account: account, statuses: statuses}) do
|
|||
|
||||
defp build_flag_object(%{statuses: statuses}) do
|
||||
Enum.map(statuses || [], &build_flag_object/1)
|
||||
|> Enum.reject(&is_nil/1)
|
||||
end
|
||||
|
||||
defp build_flag_object(%Activity{data: %{"id" => id}, object: %{data: data}}) do
|
||||
defp build_flag_object(%Activity{data: %{"id" => id, "type" => "Create"}, object: %{data: data}}) do
|
||||
activity_actor = User.get_by_ap_id(data["actor"])
|
||||
|
||||
%{
|
||||
|
@ -730,28 +731,26 @@ defp build_flag_object(%Activity{data: %{"id" => id}, object: %{data: data}}) do
|
|||
}
|
||||
end
|
||||
|
||||
defp build_flag_object(act) when is_map(act) or is_binary(act) do
|
||||
id =
|
||||
case act do
|
||||
%Activity{} = act -> act.data["id"]
|
||||
act when is_map(act) -> act["id"]
|
||||
act when is_binary(act) -> act
|
||||
end
|
||||
defp build_flag_object(%{data: %{"id" => id}}), do: build_flag_object(id)
|
||||
defp build_flag_object(%{"id" => id}), do: build_flag_object(id)
|
||||
|
||||
case Activity.get_by_ap_id_with_object(id) do
|
||||
%Activity{} = activity ->
|
||||
defp build_flag_object(ap_id) when is_binary(ap_id) do
|
||||
case Activity.get_by_ap_id_with_object(ap_id) do
|
||||
%Activity{data: %{"type" => "Create"}} = activity ->
|
||||
build_flag_object(activity)
|
||||
|
||||
nil ->
|
||||
if activity = Activity.get_by_object_ap_id_with_object(id) do
|
||||
build_flag_object(activity)
|
||||
else
|
||||
%{"id" => id, "deleted" => true}
|
||||
_ ->
|
||||
case Activity.get_by_object_ap_id_with_object(ap_id) do
|
||||
%Activity{data: %{"type" => "Create"}} = activity ->
|
||||
build_flag_object(activity)
|
||||
|
||||
_ ->
|
||||
%{"id" => ap_id, "deleted" => true}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
defp build_flag_object(_), do: []
|
||||
defp build_flag_object(_), do: nil
|
||||
|
||||
#### Report-related helpers
|
||||
def get_reports(params, page, page_size) do
|
||||
|
|
|
@ -143,7 +143,7 @@ defmodule Pleroma.Web.ApiSpec.Schemas.Status do
|
|||
"A map consisting of alternate representations of the `content` property with the key being it's mimetype. Currently the only alternate representation supported is `text/plain`"
|
||||
},
|
||||
conversation_id: %Schema{
|
||||
type: :integer,
|
||||
type: :string,
|
||||
description: "The ID of the AP context the status is associated with (if any)"
|
||||
},
|
||||
direct_conversation_id: %Schema{
|
||||
|
@ -319,7 +319,7 @@ defmodule Pleroma.Web.ApiSpec.Schemas.Status do
|
|||
"pinned" => false,
|
||||
"pleroma" => %{
|
||||
"content" => %{"text/plain" => "foobar"},
|
||||
"conversation_id" => 345_972,
|
||||
"conversation_id" => "AEXFhY7X4zd8hZK8oK",
|
||||
"direct_conversation_id" => nil,
|
||||
"emoji_reactions" => [],
|
||||
"expires_at" => nil,
|
||||
|
|
|
@ -57,11 +57,19 @@ defp get_replied_to_activities(activities) do
|
|||
end)
|
||||
end
|
||||
|
||||
defp get_context_id(%{data: %{"context_id" => context_id}}) when not is_nil(context_id),
|
||||
defp get_context_id(%{data: %{"context_id" => context_id}}) when is_binary(context_id),
|
||||
do: context_id
|
||||
|
||||
defp get_context_id(%{data: %{"context" => context}}) when is_binary(context),
|
||||
do: Utils.context_to_conversation_id(context)
|
||||
defp get_context_id(%{data: %{"context_id" => context_id}}) when is_integer(context_id),
|
||||
do: to_string(context_id)
|
||||
|
||||
defp get_context_id(%{data: %{"context" => context}}) when is_binary(context) do
|
||||
case Utils.context_to_conversation_id(context) do
|
||||
id when is_binary(id) -> id
|
||||
id when is_integer(id) -> to_string(id)
|
||||
_ -> nil
|
||||
end
|
||||
end
|
||||
|
||||
defp get_context_id(_), do: nil
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
defmodule Pleroma.Repo.Migrations.MigrateOldBookmarks do
|
||||
use Ecto.Migration
|
||||
import Ecto.Query
|
||||
alias Pleroma.Activity
|
||||
alias Pleroma.Bookmark
|
||||
alias Pleroma.MigrationHelper.LegacyActivity
|
||||
alias Pleroma.Repo
|
||||
|
||||
def up do
|
||||
|
@ -18,7 +18,7 @@ def up do
|
|||
Enum.each(bookmarks, fn ap_id ->
|
||||
activity =
|
||||
ap_id
|
||||
|> Activity.create_by_object_ap_id()
|
||||
|> LegacyActivity.create_by_object_ap_id()
|
||||
|> Repo.one()
|
||||
|
||||
unless is_nil(activity), do: {:ok, _} = Bookmark.create(user_id, activity.id)
|
||||
|
|
|
@ -6,7 +6,7 @@ defmodule Pleroma.Repo.Migrations.DeleteNotificationsFromInvisibleUsers do
|
|||
|
||||
def up do
|
||||
Pleroma.Notification
|
||||
|> join(:inner, [n], activity in assoc(n, :activity))
|
||||
|> join(:inner, [n], activity in "activities")
|
||||
|> where(
|
||||
[n, a],
|
||||
fragment("? in (SELECT ap_id FROM users WHERE invisible = true)", a.actor)
|
||||
|
|
|
@ -7,7 +7,7 @@ defmodule Pleroma.Repo.Migrations.DeleteNotificationWithoutActivity do
|
|||
def up do
|
||||
from(
|
||||
q in Pleroma.Notification,
|
||||
left_join: c in assoc(q, :activity),
|
||||
left_join: c in "activities",
|
||||
select: %{id: type(q.id, :integer)},
|
||||
where: is_nil(c.id)
|
||||
)
|
||||
|
|
|
@ -3,6 +3,7 @@ defmodule Pleroma.Repo.Migrations.MovePinnedActivitiesIntoPinnedObjects do
|
|||
|
||||
import Ecto.Query
|
||||
|
||||
alias Pleroma.MigrationHelper.LegacyActivity
|
||||
alias Pleroma.Repo
|
||||
alias Pleroma.User
|
||||
|
||||
|
@ -11,7 +12,7 @@ def up do
|
|||
|> select([u], {u.id, fragment("?.pinned_activities", u)})
|
||||
|> Repo.stream()
|
||||
|> Stream.each(fn {user_id, pinned_activities_ids} ->
|
||||
pinned_activities = Pleroma.Activity.all_by_ids_with_object(pinned_activities_ids)
|
||||
pinned_activities = LegacyActivity.all_by_ids_with_object(pinned_activities_ids)
|
||||
|
||||
pins =
|
||||
Map.new(pinned_activities, fn %{object: %{data: %{"id" => object_id}}} ->
|
||||
|
|
|
@ -0,0 +1,160 @@
|
|||
defmodule Pleroma.Repo.Migrations.ChangeObjectIdToFlake do
|
||||
@moduledoc """
|
||||
Convert object IDs to FlakeIds.
|
||||
Fortunately only a few tables have a foreign key to objects. Update them.
|
||||
"""
|
||||
use Ecto.Migration
|
||||
require Integer
|
||||
|
||||
alias Pleroma.Clippy
|
||||
alias Pleroma.Repo
|
||||
|
||||
import Ecto.Query
|
||||
|
||||
@delete_duplicate_ap_id_objects_query """
|
||||
DELETE FROM objects
|
||||
WHERE id IN (
|
||||
SELECT
|
||||
id
|
||||
FROM (
|
||||
SELECT
|
||||
id,
|
||||
row_number() OVER w as rnum
|
||||
FROM objects
|
||||
WHERE data->>'id' IS NOT NULL
|
||||
WINDOW w AS (
|
||||
PARTITION BY data->>'id'
|
||||
ORDER BY id
|
||||
)
|
||||
) t
|
||||
WHERE t.rnum > 1)
|
||||
"""
|
||||
|
||||
@convert_objects_int_ids_to_flake_ids_query """
|
||||
alter table objects
|
||||
drop constraint objects_pkey cascade,
|
||||
alter column id drop default,
|
||||
alter column id set data type uuid using cast( lpad( to_hex(id), 32, '0') as uuid),
|
||||
add primary key (id)
|
||||
"""
|
||||
|
||||
def up do
|
||||
clippy = start_clippy_heartbeats()
|
||||
|
||||
# Lock tables to avoid a running server meddling with our transaction
|
||||
execute("LOCK TABLE objects")
|
||||
execute("LOCK TABLE data_migration_failed_ids")
|
||||
execute("LOCK TABLE chat_message_references")
|
||||
execute("LOCK TABLE deliveries")
|
||||
execute("LOCK TABLE hashtags_objects")
|
||||
|
||||
# Switch object IDs to FlakeIds
|
||||
execute(fn ->
|
||||
try do
|
||||
repo().query!(@convert_objects_int_ids_to_flake_ids_query)
|
||||
rescue
|
||||
e in Postgrex.Error ->
|
||||
# Handling of error 23505, "unique_violation": https://git.pleroma.social/pleroma/pleroma/-/issues/2771
|
||||
with %{postgres: %{pg_code: "23505"}} <- e do
|
||||
repo().query!(@delete_duplicate_ap_id_objects_query)
|
||||
repo().query!(@convert_objects_int_ids_to_flake_ids_query)
|
||||
else
|
||||
_ -> raise e
|
||||
end
|
||||
end
|
||||
end)
|
||||
|
||||
# Update data_migration_failed_ids
|
||||
execute("""
|
||||
alter table data_migration_failed_ids
|
||||
drop constraint data_migration_failed_ids_pkey cascade,
|
||||
alter column record_id set data type uuid using cast( lpad( to_hex(record_id), 32, '0') as uuid),
|
||||
add primary key (data_migration_id, record_id)
|
||||
""")
|
||||
|
||||
# Update chat message foreign key
|
||||
execute("""
|
||||
alter table chat_message_references
|
||||
alter column object_id set data type uuid using cast( lpad( to_hex(object_id), 32, '0') as uuid),
|
||||
add constraint chat_message_references_object_id_fkey foreign key (object_id) references objects(id) on delete cascade
|
||||
""")
|
||||
|
||||
# Update delivery foreign key
|
||||
execute("""
|
||||
alter table deliveries
|
||||
alter column object_id set data type uuid using cast( lpad( to_hex(object_id), 32, '0') as uuid),
|
||||
add constraint deliveries_object_id_fkey foreign key (object_id) references objects(id) on delete cascade
|
||||
""")
|
||||
|
||||
# Update hashtag many-to-many foreign key
|
||||
execute("""
|
||||
alter table hashtags_objects
|
||||
alter column object_id set data type uuid using cast( lpad( to_hex(object_id), 32, '0') as uuid),
|
||||
add constraint hashtags_objects_object_id_fkey foreign key (object_id) references objects(id) on delete cascade
|
||||
""")
|
||||
|
||||
flush()
|
||||
|
||||
stop_clippy_heartbeats(clippy)
|
||||
end
|
||||
|
||||
def down do
|
||||
raise "This migration can't be reversed"
|
||||
end
|
||||
|
||||
defp start_clippy_heartbeats() do
|
||||
count = from(o in "objects", select: count(o.id)) |> Repo.one!()
|
||||
|
||||
if count > 5000 do
|
||||
heartbeat_interval = :timer.minutes(2) + :timer.seconds(30)
|
||||
|
||||
all_tips =
|
||||
Clippy.tips() ++
|
||||
[
|
||||
"The migration is still running, maybe it's time for another “tea”?",
|
||||
"Happy rabbits practice a cute behavior known as a\n“binky:” they jump up in the air\nand twist\nand spin around!",
|
||||
"Nothing and everything.\n\nI still work.",
|
||||
"Pleroma runs on a Raspberry Pi!\n\n … but this migration will take forever if you\nactually run on a raspberry pi",
|
||||
"Status? Stati? Post? Note? Toot?\nRepeat? Reboost? Boost? Retweet? Retoot??\n\nI-I'm confused."
|
||||
]
|
||||
|
||||
heartbeat = fn heartbeat, runs, all_tips, tips ->
|
||||
tips =
|
||||
if Integer.is_even(runs) do
|
||||
tips = if tips == [], do: all_tips, else: tips
|
||||
[tip | tips] = Enum.shuffle(tips)
|
||||
Clippy.puts(tip)
|
||||
tips
|
||||
else
|
||||
IO.puts(
|
||||
"\n -- #{DateTime.to_string(DateTime.utc_now())} Migration still running, please wait…\n"
|
||||
)
|
||||
|
||||
tips
|
||||
end
|
||||
|
||||
:timer.sleep(heartbeat_interval)
|
||||
heartbeat.(heartbeat, runs + 1, all_tips, tips)
|
||||
end
|
||||
|
||||
Clippy.puts([
|
||||
[:red, :bright, "It looks like you are running an older instance!"],
|
||||
[""],
|
||||
[:bright, "This migration may take a long time", :reset, " -- so you probably should"],
|
||||
["go drink a cofe, or a tea, or a beer, a whiskey, a vodka,"],
|
||||
["while it runs to deal with your temporary fediverse pause!"]
|
||||
])
|
||||
|
||||
:timer.sleep(heartbeat_interval)
|
||||
spawn_link(fn -> heartbeat.(heartbeat, 1, all_tips, []) end)
|
||||
end
|
||||
end
|
||||
|
||||
defp stop_clippy_heartbeats(pid) do
|
||||
if pid do
|
||||
Process.unlink(pid)
|
||||
Process.exit(pid, :kill)
|
||||
Clippy.puts([[:green, :bright, "Hurray!!", "", "", "Migration completed!"]])
|
||||
end
|
||||
end
|
||||
end
|
|
@ -0,0 +1,49 @@
|
|||
defmodule Pleroma.Repo.Migrations.ResolveActivityObjectConflicts do
|
||||
@moduledoc """
|
||||
Find objects with a conflicting activity ID, and update them.
|
||||
This should only happen on servers that existed before "20181218172826_users_and_activities_flake_id".
|
||||
"""
|
||||
use Ecto.Migration
|
||||
|
||||
alias Pleroma.Object
|
||||
alias Pleroma.MigrationHelper.ObjectId
|
||||
alias Pleroma.Repo
|
||||
|
||||
import Ecto.Query
|
||||
|
||||
def up do
|
||||
# Lock relevant tables
|
||||
execute("LOCK TABLE objects")
|
||||
execute("LOCK TABLE chat_message_references")
|
||||
execute("LOCK TABLE deliveries")
|
||||
execute("LOCK TABLE hashtags_objects")
|
||||
|
||||
# Temporarily disable triggers (and by consequence, fkey constraints)
|
||||
# https://stackoverflow.com/a/18709987
|
||||
Repo.query!("SET session_replication_role = replica")
|
||||
|
||||
# Update conflicting objects
|
||||
activity_conflict_query()
|
||||
|> Repo.stream()
|
||||
|> Stream.each(&update_object!/1)
|
||||
|> Stream.run()
|
||||
|
||||
# Re-enable triggers
|
||||
Repo.query!("SET session_replication_role = DEFAULT")
|
||||
end
|
||||
|
||||
# Get only objects with a conflicting activity ID.
|
||||
defp activity_conflict_query() do
|
||||
join(Object, :inner, [o], a in "activities", on: a.id == o.id)
|
||||
end
|
||||
|
||||
# Update the object and its relations with a newly-generated ID.
|
||||
defp update_object!(object) do
|
||||
new_id = ObjectId.flake_from_time(object.inserted_at)
|
||||
{:ok, %Object{}} = ObjectId.change_id(object, new_id)
|
||||
end
|
||||
|
||||
def down do
|
||||
:ok
|
||||
end
|
||||
end
|
|
@ -0,0 +1,227 @@
|
|||
defmodule Pleroma.Repo.Migrations.CombineActivitiesAndObjects do
|
||||
use Ecto.Migration
|
||||
require Integer
|
||||
|
||||
alias Pleroma.Clippy
|
||||
alias Pleroma.Repo
|
||||
|
||||
import Ecto.Query
|
||||
|
||||
@function_name "update_status_visibility_counter_cache"
|
||||
@trigger_name "status_visibility_counter_cache_trigger"
|
||||
|
||||
def up do
|
||||
clippy = start_clippy_heartbeats()
|
||||
|
||||
# Lock both tables to avoid a running server meddling with our transaction
|
||||
execute("LOCK TABLE activities")
|
||||
execute("LOCK TABLE objects")
|
||||
|
||||
# Add missing fields to objects table
|
||||
alter table(:objects) do
|
||||
add(:local, :boolean, null: false, default: true)
|
||||
add(:actor, :string)
|
||||
add(:recipients, {:array, :string}, default: [])
|
||||
end
|
||||
|
||||
# Add missing indexes to objects
|
||||
create_if_not_exists(index(:objects, [:local]))
|
||||
create_if_not_exists(index(:objects, [:actor, "id DESC NULLS LAST"]))
|
||||
create_if_not_exists(index(:objects, [:recipients], using: :gin))
|
||||
|
||||
# Intentionally omit these. According to LiveDashboard they're not used:
|
||||
#
|
||||
# create_if_not_exists(
|
||||
# index(:objects, ["(data->'to')"], name: :objects_to_index, using: :gin)
|
||||
# )
|
||||
#
|
||||
# create_if_not_exists(
|
||||
# index(:objects, ["(data->'cc')"], name: :objects_cc_index, using: :gin)
|
||||
# )
|
||||
|
||||
create_if_not_exists(
|
||||
index(:objects, ["(data->>'actor')", "inserted_at desc"], name: :objects_actor_index)
|
||||
)
|
||||
|
||||
# Some obscure Fediverse backends (WordPress, Juick) send a Create and a Note
|
||||
# with the exact same ActivityPub ID. This violates the spec and doesn't
|
||||
# work in the new system. WordPress devs were notified.
|
||||
execute(
|
||||
"DELETE FROM activities USING objects WHERE activities.data->>'id' = objects.data->>'id'"
|
||||
)
|
||||
|
||||
# Copy all activities into the newly formatted objects table
|
||||
execute(
|
||||
"INSERT INTO objects (id, data, local, actor, recipients, inserted_at, updated_at) SELECT id, data, local, actor, recipients, inserted_at, updated_at FROM activities ON CONFLICT DO NOTHING"
|
||||
)
|
||||
|
||||
# Update notifications foreign key
|
||||
execute("alter table notifications drop constraint notifications_activity_id_fkey")
|
||||
|
||||
execute(
|
||||
"alter table notifications add constraint notifications_object_id_fkey foreign key (activity_id) references objects(id) on delete cascade"
|
||||
)
|
||||
|
||||
# Update bookmarks foreign key
|
||||
execute("alter table bookmarks drop constraint bookmarks_activity_id_fkey")
|
||||
|
||||
execute(
|
||||
"alter table bookmarks add constraint bookmarks_object_id_fkey foreign key (activity_id) references objects(id) on delete cascade"
|
||||
)
|
||||
|
||||
# Update report notes foreign key
|
||||
execute("alter table report_notes drop constraint report_notes_activity_id_fkey")
|
||||
|
||||
execute(
|
||||
"alter table report_notes add constraint report_notes_object_id_fkey foreign key (activity_id) references objects(id)"
|
||||
)
|
||||
|
||||
# Nuke the old activities table
|
||||
execute("drop table activities")
|
||||
|
||||
# Update triggers
|
||||
"""
|
||||
CREATE TRIGGER #{@trigger_name}
|
||||
BEFORE
|
||||
INSERT
|
||||
OR UPDATE of recipients, data
|
||||
OR DELETE
|
||||
ON objects
|
||||
FOR EACH ROW
|
||||
EXECUTE PROCEDURE #{@function_name}();
|
||||
"""
|
||||
|> execute()
|
||||
|
||||
execute("drop function if exists thread_visibility(actor varchar, activity_id varchar)")
|
||||
execute(update_thread_visibility())
|
||||
|
||||
flush()
|
||||
|
||||
stop_clippy_heartbeats(clippy)
|
||||
end
|
||||
|
||||
def down do
|
||||
raise "Lol, there's no going back from this."
|
||||
end
|
||||
|
||||
# It acts upon objects instead of activities now
|
||||
def update_thread_visibility do
|
||||
"""
|
||||
CREATE OR REPLACE FUNCTION thread_visibility(actor varchar, object_id varchar) RETURNS boolean AS $$
|
||||
DECLARE
|
||||
public varchar := 'https://www.w3.org/ns/activitystreams#Public';
|
||||
child objects%ROWTYPE;
|
||||
object objects%ROWTYPE;
|
||||
author_fa varchar;
|
||||
valid_recipients varchar[];
|
||||
actor_user_following varchar[];
|
||||
BEGIN
|
||||
--- Fetch actor following
|
||||
SELECT array_agg(following.follower_address) INTO actor_user_following FROM following_relationships
|
||||
JOIN users ON users.id = following_relationships.follower_id
|
||||
JOIN users AS following ON following.id = following_relationships.following_id
|
||||
WHERE users.ap_id = actor;
|
||||
|
||||
--- Fetch our initial object.
|
||||
SELECT * INTO object FROM objects WHERE objects.data->>'id' = object_id;
|
||||
|
||||
LOOP
|
||||
--- Ensure that we have an object before continuing.
|
||||
--- If we don't, the thread is not satisfiable.
|
||||
IF object IS NULL THEN
|
||||
RETURN false;
|
||||
END IF;
|
||||
|
||||
--- We only care about Create objects.
|
||||
IF object.data->>'type' != 'Create' THEN
|
||||
RETURN true;
|
||||
END IF;
|
||||
|
||||
--- Normalize the child object into child.
|
||||
SELECT * INTO child FROM objects
|
||||
WHERE COALESCE(object.data->'object'->>'id', object.data->>'object') = objects.data->>'id';
|
||||
|
||||
--- Fetch the author's AS2 following collection.
|
||||
SELECT COALESCE(users.follower_address, '') INTO author_fa FROM users WHERE users.ap_id = object.actor;
|
||||
|
||||
--- Prepare valid recipients array.
|
||||
valid_recipients := ARRAY[actor, public];
|
||||
IF ARRAY[author_fa] && actor_user_following THEN
|
||||
valid_recipients := valid_recipients || author_fa;
|
||||
END IF;
|
||||
|
||||
--- Check visibility.
|
||||
IF NOT valid_recipients && object.recipients THEN
|
||||
--- object not visible, break out of the loop
|
||||
RETURN false;
|
||||
END IF;
|
||||
|
||||
--- If there's a parent, load it and do this all over again.
|
||||
IF (child.data->'inReplyTo' IS NOT NULL) AND (child.data->'inReplyTo' != 'null'::jsonb) THEN
|
||||
SELECT * INTO object FROM objects
|
||||
WHERE child.data->>'inReplyTo' = objects.data->>'id';
|
||||
ELSE
|
||||
RETURN true;
|
||||
END IF;
|
||||
END LOOP;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql IMMUTABLE;
|
||||
"""
|
||||
end
|
||||
|
||||
defp start_clippy_heartbeats() do
|
||||
count = from(a in "activities", select: count(a.id)) |> Repo.one!()
|
||||
|
||||
if count > 5000 do
|
||||
heartbeat_interval = :timer.minutes(2) + :timer.seconds(30)
|
||||
|
||||
all_tips =
|
||||
Clippy.tips() ++
|
||||
[
|
||||
"The migration is still running, maybe it's time for another “tea”?",
|
||||
"Happy rabbits practice a cute behavior known as a\n“binky:” they jump up in the air\nand twist\nand spin around!",
|
||||
"Nothing and everything.\n\nI still work.",
|
||||
"Pleroma runs on a Raspberry Pi!\n\n … but this migration will take forever if you\nactually run on a raspberry pi",
|
||||
"Status? Stati? Post? Note? Toot?\nRepeat? Reboost? Boost? Retweet? Retoot??\n\nI-I'm confused."
|
||||
]
|
||||
|
||||
heartbeat = fn heartbeat, runs, all_tips, tips ->
|
||||
tips =
|
||||
if Integer.is_even(runs) do
|
||||
tips = if tips == [], do: all_tips, else: tips
|
||||
[tip | tips] = Enum.shuffle(tips)
|
||||
Clippy.puts(tip)
|
||||
tips
|
||||
else
|
||||
IO.puts(
|
||||
"\n -- #{DateTime.to_string(DateTime.utc_now())} Migration still running, please wait…\n"
|
||||
)
|
||||
|
||||
tips
|
||||
end
|
||||
|
||||
:timer.sleep(heartbeat_interval)
|
||||
heartbeat.(heartbeat, runs + 1, all_tips, tips)
|
||||
end
|
||||
|
||||
Clippy.puts([
|
||||
[:red, :bright, "It looks like you are running an older instance!"],
|
||||
[""],
|
||||
[:bright, "This migration may take a long time", :reset, " -- so you probably should"],
|
||||
["go drink a cofe, or a tea, or a beer, a whiskey, a vodka,"],
|
||||
["while it runs to deal with your temporary fediverse pause!"]
|
||||
])
|
||||
|
||||
:timer.sleep(heartbeat_interval)
|
||||
spawn_link(fn -> heartbeat.(heartbeat, 1, all_tips, []) end)
|
||||
end
|
||||
end
|
||||
|
||||
defp stop_clippy_heartbeats(pid) do
|
||||
if pid do
|
||||
Process.unlink(pid)
|
||||
Process.exit(pid, :kill)
|
||||
Clippy.puts([[:green, :bright, "Hurray!!", "", "", "Migration completed!"]])
|
||||
end
|
||||
end
|
||||
end
|
|
@ -0,0 +1,40 @@
|
|||
defmodule Pleroma.Repo.Migrations.AddObjectConcurrentIndexes do
|
||||
use Ecto.Migration
|
||||
@disable_migration_lock true
|
||||
@disable_ddl_transaction true
|
||||
|
||||
def change do
|
||||
create_if_not_exists(index(:objects, [:actor, "id DESC NULLS LAST"], concurrently: true))
|
||||
|
||||
create_if_not_exists(
|
||||
index(:objects, ["(data->>'type')", "(data->>'context')"],
|
||||
name: :objects_context_index,
|
||||
concurrently: true
|
||||
)
|
||||
)
|
||||
|
||||
create_if_not_exists(
|
||||
index(:objects, ["(split_part(actor, '/', 3))"],
|
||||
concurrently: true,
|
||||
name: :objects_hosts
|
||||
)
|
||||
)
|
||||
|
||||
create_if_not_exists(index(:objects, ["id desc nulls last", "local"], concurrently: true))
|
||||
|
||||
create_if_not_exists(
|
||||
index(:objects, ["activity_visibility(actor, recipients, data)", "id DESC NULLS LAST"],
|
||||
name: :objects_visibility_index,
|
||||
concurrently: true,
|
||||
where: "data->>'type' = 'Create'"
|
||||
)
|
||||
)
|
||||
|
||||
create_if_not_exists(
|
||||
index(:objects, ["(coalesce(data->'object'->>'id', data->>'object'))"],
|
||||
name: :objects_create_objects_index,
|
||||
concurrently: true
|
||||
)
|
||||
)
|
||||
end
|
||||
end
|
|
@ -6,7 +6,6 @@ defmodule Mix.Tasks.Pleroma.DatabaseTest do
|
|||
use Pleroma.DataCase, async: true
|
||||
use Oban.Testing, repo: Pleroma.Repo
|
||||
|
||||
alias Pleroma.Activity
|
||||
alias Pleroma.Object
|
||||
alias Pleroma.Repo
|
||||
alias Pleroma.User
|
||||
|
@ -24,26 +23,6 @@ defmodule Mix.Tasks.Pleroma.DatabaseTest do
|
|||
:ok
|
||||
end
|
||||
|
||||
describe "running remove_embedded_objects" do
|
||||
test "it replaces objects with references" do
|
||||
user = insert(:user)
|
||||
{:ok, activity} = CommonAPI.post(user, %{status: "test"})
|
||||
new_data = Map.put(activity.data, "object", activity.object.data)
|
||||
|
||||
{:ok, activity} =
|
||||
activity
|
||||
|> Activity.change(%{data: new_data})
|
||||
|> Repo.update()
|
||||
|
||||
assert is_map(activity.data["object"])
|
||||
|
||||
Mix.Tasks.Pleroma.Database.run(["remove_embedded_objects"])
|
||||
|
||||
activity = Activity.get_by_id_with_object(activity.id)
|
||||
assert is_binary(activity.data["object"])
|
||||
end
|
||||
end
|
||||
|
||||
describe "prune_objects" do
|
||||
test "it prunes old objects from the database" do
|
||||
insert(:note)
|
||||
|
|
|
@ -51,7 +51,8 @@ test "preloading a bookmark" do
|
|||
{:ok, bookmark3} = Bookmark.create(user3.id, activity.id)
|
||||
|
||||
queried_activity =
|
||||
Ecto.Query.from(Pleroma.Activity)
|
||||
Activity
|
||||
|> Ecto.Query.where(id: ^activity.id)
|
||||
|> Activity.with_preloaded_bookmark(user3)
|
||||
|> Repo.one()
|
||||
|
||||
|
@ -64,17 +65,19 @@ test "setting thread_muted?" do
|
|||
annoyed_user = insert(:user)
|
||||
{:ok, _} = ThreadMute.add_mute(annoyed_user.id, activity.data["context"])
|
||||
|
||||
query = Ecto.Query.where(Activity, id: ^activity.id)
|
||||
|
||||
activity_with_unset_thread_muted_field =
|
||||
Ecto.Query.from(Activity)
|
||||
query
|
||||
|> Repo.one()
|
||||
|
||||
activity_for_user =
|
||||
Ecto.Query.from(Activity)
|
||||
query
|
||||
|> Activity.with_set_thread_muted_field(user)
|
||||
|> Repo.one()
|
||||
|
||||
activity_for_annoyed_user =
|
||||
Ecto.Query.from(Activity)
|
||||
query
|
||||
|> Activity.with_set_thread_muted_field(annoyed_user)
|
||||
|> Repo.one()
|
||||
|
||||
|
@ -90,7 +93,7 @@ test "when association is loaded" do
|
|||
{:ok, bookmark} = Bookmark.create(user.id, activity.id)
|
||||
|
||||
queried_activity =
|
||||
Ecto.Query.from(Pleroma.Activity)
|
||||
Ecto.Query.where(Activity, id: ^activity.id)
|
||||
|> Activity.with_preloaded_bookmark(user)
|
||||
|> Repo.one()
|
||||
|
||||
|
@ -103,7 +106,7 @@ test "when association is not loaded" do
|
|||
{:ok, bookmark} = Bookmark.create(user.id, activity.id)
|
||||
|
||||
queried_activity =
|
||||
Ecto.Query.from(Pleroma.Activity)
|
||||
Ecto.Query.where(Activity, id: ^activity.id)
|
||||
|> Repo.one()
|
||||
|
||||
assert Activity.get_bookmark(queried_activity, user) == bookmark
|
||||
|
@ -266,7 +269,11 @@ test "add_by_params_query/3" do
|
|||
insert(:add_activity, user: user, note: note)
|
||||
insert(:add_activity, user: user)
|
||||
|
||||
assert Repo.aggregate(Activity, :count, :id) == 4
|
||||
activities_query =
|
||||
Activity
|
||||
|> Ecto.Query.where(fragment("data->>'type' IN ('Create', 'Add')"))
|
||||
|
||||
assert Repo.aggregate(activities_query, :count, :id) == 4
|
||||
|
||||
add_query =
|
||||
Activity.add_by_params_query(note.data["object"], user.ap_id, user.featured_address)
|
||||
|
@ -276,6 +283,6 @@ test "add_by_params_query/3" do
|
|||
Repo.delete_all(add_query)
|
||||
assert Repo.aggregate(add_query, :count, :id) == 0
|
||||
|
||||
assert Repo.aggregate(Activity, :count, :id) == 2
|
||||
assert Repo.aggregate(activities_query, :count, :id) == 2
|
||||
end
|
||||
end
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
# Pleroma: A lightweight social networking server
|
||||
# Copyright © 2017-2021 Pleroma Authors <https://pleroma.social/>
|
||||
# SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
defmodule Pleroma.MigrationHelper.ObjectIdTest do
|
||||
use Pleroma.DataCase, async: true
|
||||
alias Pleroma.MigrationHelper.ObjectId
|
||||
|
||||
test "flake_from_time/1" do
|
||||
now = NaiveDateTime.utc_now()
|
||||
id = ObjectId.flake_from_time(now)
|
||||
|
||||
assert FlakeId.flake_id?(id)
|
||||
end
|
||||
end
|
|
@ -18,7 +18,7 @@ defmodule Pleroma.PaginationTest do
|
|||
end
|
||||
|
||||
test "paginates by min_id", %{notes: notes} do
|
||||
id = Enum.at(notes, 2).id |> Integer.to_string()
|
||||
id = Enum.at(notes, 2).id
|
||||
|
||||
%{total: total, items: paginated} =
|
||||
Pagination.fetch_paginated(Object, %{min_id: id, total: true})
|
||||
|
@ -28,7 +28,7 @@ test "paginates by min_id", %{notes: notes} do
|
|||
end
|
||||
|
||||
test "paginates by since_id", %{notes: notes} do
|
||||
id = Enum.at(notes, 2).id |> Integer.to_string()
|
||||
id = Enum.at(notes, 2).id
|
||||
|
||||
%{total: total, items: paginated} =
|
||||
Pagination.fetch_paginated(Object, %{since_id: id, total: true})
|
||||
|
@ -38,7 +38,7 @@ test "paginates by since_id", %{notes: notes} do
|
|||
end
|
||||
|
||||
test "paginates by max_id", %{notes: notes} do
|
||||
id = Enum.at(notes, 1).id |> Integer.to_string()
|
||||
id = Enum.at(notes, 1).id
|
||||
|
||||
%{total: total, items: paginated} =
|
||||
Pagination.fetch_paginated(Object, %{max_id: id, total: true})
|
||||
|
@ -48,7 +48,7 @@ test "paginates by max_id", %{notes: notes} do
|
|||
end
|
||||
|
||||
test "paginates by min_id & limit", %{notes: notes} do
|
||||
id = Enum.at(notes, 2).id |> Integer.to_string()
|
||||
id = Enum.at(notes, 2).id
|
||||
|
||||
paginated = Pagination.fetch_paginated(Object, %{min_id: id, limit: 1})
|
||||
|
||||
|
@ -56,7 +56,7 @@ test "paginates by min_id & limit", %{notes: notes} do
|
|||
end
|
||||
|
||||
test "handles id gracefully", %{notes: notes} do
|
||||
id = Enum.at(notes, 1).id |> Integer.to_string()
|
||||
id = Enum.at(notes, 1).id
|
||||
|
||||
paginated =
|
||||
Pagination.fetch_paginated(Object, %{
|
||||
|
|
|
@ -19,6 +19,8 @@ defmodule Pleroma.UserTest do
|
|||
import ExUnit.CaptureLog
|
||||
import Swoosh.TestAssertions
|
||||
|
||||
require Ecto.Query
|
||||
|
||||
setup_all do
|
||||
Tesla.Mock.mock_global(fn env -> apply(HttpRequestMock, :request, [env]) end)
|
||||
:ok
|
||||
|
@ -436,7 +438,11 @@ test "it sends a welcome message if it is set" do
|
|||
{:ok, registered_user} = User.register(cng)
|
||||
ObanHelpers.perform_all()
|
||||
|
||||
activity = Repo.one(Pleroma.Activity)
|
||||
activity =
|
||||
Activity
|
||||
|> Ecto.Query.where(fragment("data->>'type' = 'Create'"))
|
||||
|> Repo.one()
|
||||
|
||||
assert registered_user.ap_id in activity.recipients
|
||||
assert Object.normalize(activity, fetch: false).data["content"] =~ "direct message"
|
||||
assert activity.actor == welcome_user.ap_id
|
||||
|
@ -452,7 +458,11 @@ test "it sends a welcome chat message if it is set" do
|
|||
{:ok, registered_user} = User.register(cng)
|
||||
ObanHelpers.perform_all()
|
||||
|
||||
activity = Repo.one(Pleroma.Activity)
|
||||
activity =
|
||||
Activity
|
||||
|> Ecto.Query.where(fragment("data->>'type' = 'Create'"))
|
||||
|> Repo.one()
|
||||
|
||||
assert registered_user.ap_id in activity.recipients
|
||||
assert Object.normalize(activity, fetch: false).data["content"] =~ "chat message"
|
||||
assert activity.actor == welcome_user.ap_id
|
||||
|
@ -491,7 +501,11 @@ test "it sends a welcome chat message when Simple policy applied to local instan
|
|||
{:ok, registered_user} = User.register(cng)
|
||||
ObanHelpers.perform_all()
|
||||
|
||||
activity = Repo.one(Pleroma.Activity)
|
||||
activity =
|
||||
Activity
|
||||
|> Ecto.Query.where(fragment("data->>'type' = 'Create'"))
|
||||
|> Repo.one()
|
||||
|
||||
assert registered_user.ap_id in activity.recipients
|
||||
assert Object.normalize(activity, fetch: false).data["content"] =~ "chat message"
|
||||
assert activity.actor == welcome_user.ap_id
|
||||
|
|
|
@ -1143,7 +1143,7 @@ test "forwarded report", %{conn: conn} do
|
|||
|
||||
ObanHelpers.perform(all_enqueued(worker: ReceiverWorker))
|
||||
|
||||
assert Pleroma.Repo.aggregate(Activity, :count, :id) == 2
|
||||
assert Pleroma.Repo.aggregate(Object, :count, :id) == 4
|
||||
|
||||
ObanHelpers.perform_all()
|
||||
|
||||
|
|
|
@ -1491,8 +1491,8 @@ test "it filters broken threads" do
|
|||
ActivityPub.fetch_activities([user1.ap_id | User.following(user1)], %{user: user1})
|
||||
|> Enum.map(fn a -> a.id end)
|
||||
|
||||
assert [public_activity.id, private_activity_1.id] == activities
|
||||
assert length(activities) == 2
|
||||
assert [public_activity.id, private_activity_1.id, private_activity_3.id] == activities
|
||||
assert length(activities) == 3
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -1611,8 +1611,7 @@ test "it can create a Flag activity",
|
|||
content: content
|
||||
})
|
||||
|
||||
assert Repo.aggregate(Activity, :count, :id) == 1
|
||||
assert Repo.aggregate(Object, :count, :id) == 2
|
||||
assert Repo.aggregate(Object, :count, :id) == 3
|
||||
assert Repo.aggregate(Notification, :count, :id) == 0
|
||||
end
|
||||
end
|
||||
|
|
|
@ -364,7 +364,7 @@ test "it returns reports with notes", %{conn: conn, admin: admin} do
|
|||
[note, _] = notes
|
||||
|
||||
assert note["user"]["nickname"] == admin.nickname
|
||||
assert note["content"] == "this is disgusting!"
|
||||
assert note["content"] =~ "this is disgusting"
|
||||
assert note["created_at"]
|
||||
assert response["total"] == 1
|
||||
end
|
||||
|
|
Loading…
Reference in New Issue