vf_scale.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271
  1. /*
  2. * Copyright (c) 2007 Bobby Bingham
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * scale video filter
  23. */
  24. #include <float.h>
  25. #include <stdio.h>
  26. #include <string.h>
  27. #include "avfilter.h"
  28. #include "filters.h"
  29. #include "formats.h"
  30. #include "framesync.h"
  31. #include "libavutil/pixfmt.h"
  32. #include "scale_eval.h"
  33. #include "video.h"
  34. #include "libavutil/eval.h"
  35. #include "libavutil/imgutils_internal.h"
  36. #include "libavutil/internal.h"
  37. #include "libavutil/mem.h"
  38. #include "libavutil/opt.h"
  39. #include "libavutil/parseutils.h"
  40. #include "libavutil/pixdesc.h"
  41. #include "libswscale/swscale.h"
  42. static const char *const var_names[] = {
  43. "in_w", "iw",
  44. "in_h", "ih",
  45. "out_w", "ow",
  46. "out_h", "oh",
  47. "a",
  48. "sar",
  49. "dar",
  50. "hsub",
  51. "vsub",
  52. "ohsub",
  53. "ovsub",
  54. "n",
  55. "t",
  56. "ref_w", "rw",
  57. "ref_h", "rh",
  58. "ref_a",
  59. "ref_sar",
  60. "ref_dar", "rdar",
  61. "ref_hsub",
  62. "ref_vsub",
  63. "ref_n",
  64. "ref_t",
  65. "ref_pos",
  66. /* Legacy variables for scale2ref */
  67. "main_w",
  68. "main_h",
  69. "main_a",
  70. "main_sar",
  71. "main_dar", "mdar",
  72. "main_hsub",
  73. "main_vsub",
  74. "main_n",
  75. "main_t",
  76. "main_pos",
  77. NULL
  78. };
  79. enum var_name {
  80. VAR_IN_W, VAR_IW,
  81. VAR_IN_H, VAR_IH,
  82. VAR_OUT_W, VAR_OW,
  83. VAR_OUT_H, VAR_OH,
  84. VAR_A,
  85. VAR_SAR,
  86. VAR_DAR,
  87. VAR_HSUB,
  88. VAR_VSUB,
  89. VAR_OHSUB,
  90. VAR_OVSUB,
  91. VAR_N,
  92. VAR_T,
  93. VAR_REF_W, VAR_RW,
  94. VAR_REF_H, VAR_RH,
  95. VAR_REF_A,
  96. VAR_REF_SAR,
  97. VAR_REF_DAR, VAR_RDAR,
  98. VAR_REF_HSUB,
  99. VAR_REF_VSUB,
  100. VAR_REF_N,
  101. VAR_REF_T,
  102. VAR_REF_POS,
  103. VAR_S2R_MAIN_W,
  104. VAR_S2R_MAIN_H,
  105. VAR_S2R_MAIN_A,
  106. VAR_S2R_MAIN_SAR,
  107. VAR_S2R_MAIN_DAR, VAR_S2R_MDAR,
  108. VAR_S2R_MAIN_HSUB,
  109. VAR_S2R_MAIN_VSUB,
  110. VAR_S2R_MAIN_N,
  111. VAR_S2R_MAIN_T,
  112. VAR_S2R_MAIN_POS,
  113. VARS_NB
  114. };
  115. enum EvalMode {
  116. EVAL_MODE_INIT,
  117. EVAL_MODE_FRAME,
  118. EVAL_MODE_NB
  119. };
  120. typedef struct ScaleContext {
  121. const AVClass *class;
  122. SwsContext *sws;
  123. FFFrameSync fs;
  124. /**
  125. * New dimensions. Special values are:
  126. * 0 = original width/height
  127. * -1 = keep original aspect
  128. * -N = try to keep aspect but make sure it is divisible by N
  129. */
  130. int w, h;
  131. char *size_str;
  132. double param[2]; // sws params
  133. int hsub, vsub; ///< chroma subsampling
  134. int slice_y; ///< top of current output slice
  135. int interlaced;
  136. int uses_ref;
  137. char *w_expr; ///< width expression string
  138. char *h_expr; ///< height expression string
  139. AVExpr *w_pexpr;
  140. AVExpr *h_pexpr;
  141. double var_values[VARS_NB];
  142. char *flags_str;
  143. int in_color_matrix;
  144. int out_color_matrix;
  145. int in_primaries;
  146. int out_primaries;
  147. int in_transfer;
  148. int out_transfer;
  149. int in_range;
  150. int out_range;
  151. int in_chroma_loc;
  152. int out_chroma_loc;
  153. int out_h_chr_pos;
  154. int out_v_chr_pos;
  155. int in_h_chr_pos;
  156. int in_v_chr_pos;
  157. int force_original_aspect_ratio;
  158. int force_divisible_by;
  159. int reset_sar;
  160. int eval_mode; ///< expression evaluation mode
  161. } ScaleContext;
  162. const FFFilter ff_vf_scale2ref;
  163. #define IS_SCALE2REF(ctx) ((ctx)->filter == &ff_vf_scale2ref.p)
  164. static int config_props(AVFilterLink *outlink);
  165. static int check_exprs(AVFilterContext *ctx)
  166. {
  167. ScaleContext *scale = ctx->priv;
  168. unsigned vars_w[VARS_NB] = { 0 }, vars_h[VARS_NB] = { 0 };
  169. if (!scale->w_pexpr && !scale->h_pexpr)
  170. return AVERROR(EINVAL);
  171. if (scale->w_pexpr)
  172. av_expr_count_vars(scale->w_pexpr, vars_w, VARS_NB);
  173. if (scale->h_pexpr)
  174. av_expr_count_vars(scale->h_pexpr, vars_h, VARS_NB);
  175. if (vars_w[VAR_OUT_W] || vars_w[VAR_OW]) {
  176. av_log(ctx, AV_LOG_ERROR, "Width expression cannot be self-referencing: '%s'.\n", scale->w_expr);
  177. return AVERROR(EINVAL);
  178. }
  179. if (vars_h[VAR_OUT_H] || vars_h[VAR_OH]) {
  180. av_log(ctx, AV_LOG_ERROR, "Height expression cannot be self-referencing: '%s'.\n", scale->h_expr);
  181. return AVERROR(EINVAL);
  182. }
  183. if ((vars_w[VAR_OUT_H] || vars_w[VAR_OH]) &&
  184. (vars_h[VAR_OUT_W] || vars_h[VAR_OW])) {
  185. av_log(ctx, AV_LOG_WARNING, "Circular references detected for width '%s' and height '%s' - possibly invalid.\n", scale->w_expr, scale->h_expr);
  186. }
  187. if (vars_w[VAR_REF_W] || vars_h[VAR_REF_W] ||
  188. vars_w[VAR_RW] || vars_h[VAR_RW] ||
  189. vars_w[VAR_REF_H] || vars_h[VAR_REF_H] ||
  190. vars_w[VAR_RH] || vars_h[VAR_RH] ||
  191. vars_w[VAR_REF_A] || vars_h[VAR_REF_A] ||
  192. vars_w[VAR_REF_SAR] || vars_h[VAR_REF_SAR] ||
  193. vars_w[VAR_REF_DAR] || vars_h[VAR_REF_DAR] ||
  194. vars_w[VAR_RDAR] || vars_h[VAR_RDAR] ||
  195. vars_w[VAR_REF_HSUB] || vars_h[VAR_REF_HSUB] ||
  196. vars_w[VAR_REF_VSUB] || vars_h[VAR_REF_VSUB] ||
  197. vars_w[VAR_REF_N] || vars_h[VAR_REF_N] ||
  198. vars_w[VAR_REF_T] || vars_h[VAR_REF_T] ||
  199. vars_w[VAR_REF_POS] || vars_h[VAR_REF_POS]) {
  200. scale->uses_ref = 1;
  201. }
  202. if (!IS_SCALE2REF(ctx) &&
  203. (vars_w[VAR_S2R_MAIN_W] || vars_h[VAR_S2R_MAIN_W] ||
  204. vars_w[VAR_S2R_MAIN_H] || vars_h[VAR_S2R_MAIN_H] ||
  205. vars_w[VAR_S2R_MAIN_A] || vars_h[VAR_S2R_MAIN_A] ||
  206. vars_w[VAR_S2R_MAIN_SAR] || vars_h[VAR_S2R_MAIN_SAR] ||
  207. vars_w[VAR_S2R_MAIN_DAR] || vars_h[VAR_S2R_MAIN_DAR] ||
  208. vars_w[VAR_S2R_MDAR] || vars_h[VAR_S2R_MDAR] ||
  209. vars_w[VAR_S2R_MAIN_HSUB] || vars_h[VAR_S2R_MAIN_HSUB] ||
  210. vars_w[VAR_S2R_MAIN_VSUB] || vars_h[VAR_S2R_MAIN_VSUB] ||
  211. vars_w[VAR_S2R_MAIN_N] || vars_h[VAR_S2R_MAIN_N] ||
  212. vars_w[VAR_S2R_MAIN_T] || vars_h[VAR_S2R_MAIN_T] ||
  213. vars_w[VAR_S2R_MAIN_POS] || vars_h[VAR_S2R_MAIN_POS]) ) {
  214. av_log(ctx, AV_LOG_ERROR, "Expressions with scale2ref variables are not valid in scale filter.\n");
  215. return AVERROR(EINVAL);
  216. }
  217. if (scale->eval_mode == EVAL_MODE_INIT &&
  218. (vars_w[VAR_N] || vars_h[VAR_N] ||
  219. vars_w[VAR_T] || vars_h[VAR_T] ||
  220. vars_w[VAR_S2R_MAIN_N] || vars_h[VAR_S2R_MAIN_N] ||
  221. vars_w[VAR_S2R_MAIN_T] || vars_h[VAR_S2R_MAIN_T] ||
  222. vars_w[VAR_S2R_MAIN_POS] || vars_h[VAR_S2R_MAIN_POS]) ) {
  223. av_log(ctx, AV_LOG_ERROR, "Expressions with frame variables 'n', 't', 'pos' are not valid in init eval_mode.\n");
  224. return AVERROR(EINVAL);
  225. }
  226. return 0;
  227. }
  228. static int scale_parse_expr(AVFilterContext *ctx, char *str_expr, AVExpr **pexpr_ptr, const char *var, const char *args)
  229. {
  230. ScaleContext *scale = ctx->priv;
  231. int ret, is_inited = 0;
  232. char *old_str_expr = NULL;
  233. AVExpr *old_pexpr = NULL;
  234. if (str_expr) {
  235. old_str_expr = av_strdup(str_expr);
  236. if (!old_str_expr)
  237. return AVERROR(ENOMEM);
  238. av_opt_set(scale, var, args, 0);
  239. }
  240. if (*pexpr_ptr) {
  241. old_pexpr = *pexpr_ptr;
  242. *pexpr_ptr = NULL;
  243. is_inited = 1;
  244. }
  245. ret = av_expr_parse(pexpr_ptr, args, var_names,
  246. NULL, NULL, NULL, NULL, 0, ctx);
  247. if (ret < 0) {
  248. av_log(ctx, AV_LOG_ERROR, "Cannot parse expression for %s: '%s'\n", var, args);
  249. goto revert;
  250. }
  251. ret = check_exprs(ctx);
  252. if (ret < 0)
  253. goto revert;
  254. if (is_inited && (ret = config_props(ctx->outputs[0])) < 0)
  255. goto revert;
  256. av_expr_free(old_pexpr);
  257. old_pexpr = NULL;
  258. av_freep(&old_str_expr);
  259. return 0;
  260. revert:
  261. av_expr_free(*pexpr_ptr);
  262. *pexpr_ptr = NULL;
  263. if (old_str_expr) {
  264. av_opt_set(scale, var, old_str_expr, 0);
  265. av_free(old_str_expr);
  266. }
  267. if (old_pexpr)
  268. *pexpr_ptr = old_pexpr;
  269. return ret;
  270. }
  271. static av_cold int preinit(AVFilterContext *ctx)
  272. {
  273. ScaleContext *scale = ctx->priv;
  274. scale->sws = sws_alloc_context();
  275. if (!scale->sws)
  276. return AVERROR(ENOMEM);
  277. // set threads=0, so we can later check whether the user modified it
  278. scale->sws->threads = 0;
  279. ff_framesync_preinit(&scale->fs);
  280. return 0;
  281. }
  282. static int do_scale(FFFrameSync *fs);
  283. static av_cold int init(AVFilterContext *ctx)
  284. {
  285. ScaleContext *scale = ctx->priv;
  286. int ret;
  287. if (IS_SCALE2REF(ctx))
  288. av_log(ctx, AV_LOG_WARNING, "scale2ref is deprecated, use scale=rw:rh instead\n");
  289. if (scale->size_str && (scale->w_expr || scale->h_expr)) {
  290. av_log(ctx, AV_LOG_ERROR,
  291. "Size and width/height expressions cannot be set at the same time.\n");
  292. return AVERROR(EINVAL);
  293. }
  294. if (scale->w_expr && !scale->h_expr)
  295. FFSWAP(char *, scale->w_expr, scale->size_str);
  296. if (scale->size_str) {
  297. char buf[32];
  298. if ((ret = av_parse_video_size(&scale->w, &scale->h, scale->size_str)) < 0) {
  299. av_log(ctx, AV_LOG_ERROR,
  300. "Invalid size '%s'\n", scale->size_str);
  301. return ret;
  302. }
  303. snprintf(buf, sizeof(buf)-1, "%d", scale->w);
  304. av_opt_set(scale, "w", buf, 0);
  305. snprintf(buf, sizeof(buf)-1, "%d", scale->h);
  306. av_opt_set(scale, "h", buf, 0);
  307. }
  308. if (!scale->w_expr)
  309. av_opt_set(scale, "w", "iw", 0);
  310. if (!scale->h_expr)
  311. av_opt_set(scale, "h", "ih", 0);
  312. ret = scale_parse_expr(ctx, NULL, &scale->w_pexpr, "width", scale->w_expr);
  313. if (ret < 0)
  314. return ret;
  315. ret = scale_parse_expr(ctx, NULL, &scale->h_pexpr, "height", scale->h_expr);
  316. if (ret < 0)
  317. return ret;
  318. if (scale->in_primaries != -1 && !sws_test_primaries(scale->in_primaries, 0)) {
  319. av_log(ctx, AV_LOG_ERROR, "Unsupported input primaries '%s'\n",
  320. av_color_primaries_name(scale->in_primaries));
  321. return AVERROR(EINVAL);
  322. }
  323. if (scale->out_primaries != -1 && !sws_test_primaries(scale->out_primaries, 1)) {
  324. av_log(ctx, AV_LOG_ERROR, "Unsupported output primaries '%s'\n",
  325. av_color_primaries_name(scale->out_primaries));
  326. return AVERROR(EINVAL);
  327. }
  328. if (scale->in_transfer != -1 && !sws_test_transfer(scale->in_transfer, 0)) {
  329. av_log(ctx, AV_LOG_ERROR, "Unsupported input transfer '%s'\n",
  330. av_color_transfer_name(scale->in_transfer));
  331. return AVERROR(EINVAL);
  332. }
  333. if (scale->out_transfer != -1 && !sws_test_transfer(scale->out_transfer, 1)) {
  334. av_log(ctx, AV_LOG_ERROR, "Unsupported output transfer '%s'\n",
  335. av_color_transfer_name(scale->out_transfer));
  336. return AVERROR(EINVAL);
  337. }
  338. if (scale->in_color_matrix != -1 && !sws_test_colorspace(scale->in_color_matrix, 0)) {
  339. av_log(ctx, AV_LOG_ERROR, "Unsupported input color matrix '%s'\n",
  340. av_color_space_name(scale->in_color_matrix));
  341. return AVERROR(EINVAL);
  342. }
  343. if (scale->out_color_matrix != -1 && !sws_test_colorspace(scale->out_color_matrix, 1)) {
  344. av_log(ctx, AV_LOG_ERROR, "Unsupported output color matrix '%s'\n",
  345. av_color_space_name(scale->out_color_matrix));
  346. return AVERROR(EINVAL);
  347. }
  348. av_log(ctx, AV_LOG_VERBOSE, "w:%s h:%s flags:'%s' interl:%d\n",
  349. scale->w_expr, scale->h_expr, (char *)av_x_if_null(scale->flags_str, ""), scale->interlaced);
  350. if (scale->flags_str && *scale->flags_str) {
  351. ret = av_opt_set(scale->sws, "sws_flags", scale->flags_str, 0);
  352. if (ret < 0)
  353. return ret;
  354. }
  355. for (int i = 0; i < FF_ARRAY_ELEMS(scale->param); i++)
  356. if (scale->param[i] != DBL_MAX)
  357. scale->sws->scaler_params[i] = scale->param[i];
  358. scale->sws->src_h_chr_pos = scale->in_h_chr_pos;
  359. scale->sws->src_v_chr_pos = scale->in_v_chr_pos;
  360. scale->sws->dst_h_chr_pos = scale->out_h_chr_pos;
  361. scale->sws->dst_v_chr_pos = scale->out_v_chr_pos;
  362. // use generic thread-count if the user did not set it explicitly
  363. if (!scale->sws->threads)
  364. scale->sws->threads = ff_filter_get_nb_threads(ctx);
  365. if (!IS_SCALE2REF(ctx) && scale->uses_ref) {
  366. AVFilterPad pad = {
  367. .name = "ref",
  368. .type = AVMEDIA_TYPE_VIDEO,
  369. };
  370. ret = ff_append_inpad(ctx, &pad);
  371. if (ret < 0)
  372. return ret;
  373. }
  374. return 0;
  375. }
  376. static av_cold void uninit(AVFilterContext *ctx)
  377. {
  378. ScaleContext *scale = ctx->priv;
  379. av_expr_free(scale->w_pexpr);
  380. av_expr_free(scale->h_pexpr);
  381. scale->w_pexpr = scale->h_pexpr = NULL;
  382. ff_framesync_uninit(&scale->fs);
  383. sws_free_context(&scale->sws);
  384. }
  385. static int query_formats(const AVFilterContext *ctx,
  386. AVFilterFormatsConfig **cfg_in,
  387. AVFilterFormatsConfig **cfg_out)
  388. {
  389. const ScaleContext *scale = ctx->priv;
  390. AVFilterFormats *formats;
  391. const AVPixFmtDescriptor *desc;
  392. enum AVPixelFormat pix_fmt;
  393. int ret;
  394. desc = NULL;
  395. formats = NULL;
  396. while ((desc = av_pix_fmt_desc_next(desc))) {
  397. pix_fmt = av_pix_fmt_desc_get_id(desc);
  398. if (sws_test_format(pix_fmt, 0)) {
  399. if ((ret = ff_add_format(&formats, pix_fmt)) < 0)
  400. return ret;
  401. }
  402. }
  403. if ((ret = ff_formats_ref(formats, &cfg_in[0]->formats)) < 0)
  404. return ret;
  405. desc = NULL;
  406. formats = NULL;
  407. while ((desc = av_pix_fmt_desc_next(desc))) {
  408. pix_fmt = av_pix_fmt_desc_get_id(desc);
  409. if (sws_test_format(pix_fmt, 1) || pix_fmt == AV_PIX_FMT_PAL8) {
  410. if ((ret = ff_add_format(&formats, pix_fmt)) < 0)
  411. return ret;
  412. }
  413. }
  414. if ((ret = ff_formats_ref(formats, &cfg_out[0]->formats)) < 0)
  415. return ret;
  416. /* accept all supported inputs, even if user overrides their properties */
  417. formats = ff_all_color_spaces();
  418. for (int i = 0; i < formats->nb_formats; i++) {
  419. if (!sws_test_colorspace(formats->formats[i], 0)) {
  420. for (int j = i--; j + 1 < formats->nb_formats; j++)
  421. formats->formats[j] = formats->formats[j + 1];
  422. formats->nb_formats--;
  423. }
  424. }
  425. if ((ret = ff_formats_ref(formats, &cfg_in[0]->color_spaces)) < 0)
  426. return ret;
  427. if ((ret = ff_formats_ref(ff_all_color_ranges(),
  428. &cfg_in[0]->color_ranges)) < 0)
  429. return ret;
  430. /* propagate output properties if overridden */
  431. if (scale->out_color_matrix != AVCOL_SPC_UNSPECIFIED) {
  432. formats = ff_make_formats_list_singleton(scale->out_color_matrix);
  433. } else {
  434. formats = ff_all_color_spaces();
  435. for (int i = 0; i < formats->nb_formats; i++) {
  436. if (!sws_test_colorspace(formats->formats[i], 1)) {
  437. for (int j = i--; j + 1 < formats->nb_formats; j++)
  438. formats->formats[j] = formats->formats[j + 1];
  439. formats->nb_formats--;
  440. }
  441. }
  442. }
  443. if ((ret = ff_formats_ref(formats, &cfg_out[0]->color_spaces)) < 0)
  444. return ret;
  445. formats = scale->out_range != AVCOL_RANGE_UNSPECIFIED
  446. ? ff_make_formats_list_singleton(scale->out_range)
  447. : ff_all_color_ranges();
  448. if ((ret = ff_formats_ref(formats, &cfg_out[0]->color_ranges)) < 0)
  449. return ret;
  450. if (scale->sws->alpha_blend) {
  451. if ((ret = ff_formats_ref(ff_make_formats_list_singleton(AVALPHA_MODE_STRAIGHT),
  452. &cfg_in[0]->alpha_modes)) < 0)
  453. return ret;
  454. }
  455. return 0;
  456. }
  457. static int scale_eval_dimensions(AVFilterContext *ctx)
  458. {
  459. ScaleContext *scale = ctx->priv;
  460. const char scale2ref = IS_SCALE2REF(ctx);
  461. const AVFilterLink *inlink = scale2ref ? ctx->inputs[1] : ctx->inputs[0];
  462. const AVFilterLink *outlink = ctx->outputs[0];
  463. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  464. const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(outlink->format);
  465. char *expr;
  466. int eval_w, eval_h;
  467. int ret;
  468. double res;
  469. const AVPixFmtDescriptor *main_desc;
  470. const AVFilterLink *main_link;
  471. if (scale2ref) {
  472. main_link = ctx->inputs[0];
  473. main_desc = av_pix_fmt_desc_get(main_link->format);
  474. }
  475. scale->var_values[VAR_IN_W] = scale->var_values[VAR_IW] = inlink->w;
  476. scale->var_values[VAR_IN_H] = scale->var_values[VAR_IH] = inlink->h;
  477. scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = NAN;
  478. scale->var_values[VAR_OUT_H] = scale->var_values[VAR_OH] = NAN;
  479. scale->var_values[VAR_A] = (double) inlink->w / inlink->h;
  480. scale->var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
  481. (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
  482. scale->var_values[VAR_DAR] = scale->var_values[VAR_A] * scale->var_values[VAR_SAR];
  483. scale->var_values[VAR_HSUB] = 1 << desc->log2_chroma_w;
  484. scale->var_values[VAR_VSUB] = 1 << desc->log2_chroma_h;
  485. scale->var_values[VAR_OHSUB] = 1 << out_desc->log2_chroma_w;
  486. scale->var_values[VAR_OVSUB] = 1 << out_desc->log2_chroma_h;
  487. if (scale2ref) {
  488. scale->var_values[VAR_S2R_MAIN_W] = main_link->w;
  489. scale->var_values[VAR_S2R_MAIN_H] = main_link->h;
  490. scale->var_values[VAR_S2R_MAIN_A] = (double) main_link->w / main_link->h;
  491. scale->var_values[VAR_S2R_MAIN_SAR] = main_link->sample_aspect_ratio.num ?
  492. (double) main_link->sample_aspect_ratio.num / main_link->sample_aspect_ratio.den : 1;
  493. scale->var_values[VAR_S2R_MAIN_DAR] = scale->var_values[VAR_S2R_MDAR] =
  494. scale->var_values[VAR_S2R_MAIN_A] * scale->var_values[VAR_S2R_MAIN_SAR];
  495. scale->var_values[VAR_S2R_MAIN_HSUB] = 1 << main_desc->log2_chroma_w;
  496. scale->var_values[VAR_S2R_MAIN_VSUB] = 1 << main_desc->log2_chroma_h;
  497. }
  498. if (scale->uses_ref) {
  499. const AVFilterLink *reflink = ctx->inputs[1];
  500. const AVPixFmtDescriptor *ref_desc = av_pix_fmt_desc_get(reflink->format);
  501. scale->var_values[VAR_REF_W] = scale->var_values[VAR_RW] = reflink->w;
  502. scale->var_values[VAR_REF_H] = scale->var_values[VAR_RH] = reflink->h;
  503. scale->var_values[VAR_REF_A] = (double) reflink->w / reflink->h;
  504. scale->var_values[VAR_REF_SAR] = reflink->sample_aspect_ratio.num ?
  505. (double) reflink->sample_aspect_ratio.num / reflink->sample_aspect_ratio.den : 1;
  506. scale->var_values[VAR_REF_DAR] = scale->var_values[VAR_RDAR] =
  507. scale->var_values[VAR_REF_A] * scale->var_values[VAR_REF_SAR];
  508. scale->var_values[VAR_REF_HSUB] = 1 << ref_desc->log2_chroma_w;
  509. scale->var_values[VAR_REF_VSUB] = 1 << ref_desc->log2_chroma_h;
  510. }
  511. res = av_expr_eval(scale->w_pexpr, scale->var_values, NULL);
  512. eval_w = scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = (int) res == 0 ? inlink->w : (int) res;
  513. res = av_expr_eval(scale->h_pexpr, scale->var_values, NULL);
  514. if (isnan(res)) {
  515. expr = scale->h_expr;
  516. ret = AVERROR(EINVAL);
  517. goto fail;
  518. }
  519. eval_h = scale->var_values[VAR_OUT_H] = scale->var_values[VAR_OH] = (int) res == 0 ? inlink->h : (int) res;
  520. res = av_expr_eval(scale->w_pexpr, scale->var_values, NULL);
  521. if (isnan(res)) {
  522. expr = scale->w_expr;
  523. ret = AVERROR(EINVAL);
  524. goto fail;
  525. }
  526. eval_w = scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = (int) res == 0 ? inlink->w : (int) res;
  527. scale->w = eval_w;
  528. scale->h = eval_h;
  529. return 0;
  530. fail:
  531. av_log(ctx, AV_LOG_ERROR,
  532. "Error when evaluating the expression '%s'.\n", expr);
  533. return ret;
  534. }
  535. static int config_props(AVFilterLink *outlink)
  536. {
  537. AVFilterContext *ctx = outlink->src;
  538. AVFilterLink *inlink0 = outlink->src->inputs[0];
  539. AVFilterLink *inlink = IS_SCALE2REF(ctx) ?
  540. outlink->src->inputs[1] :
  541. outlink->src->inputs[0];
  542. ScaleContext *scale = ctx->priv;
  543. uint8_t *flags_val = NULL;
  544. double w_adj = 1.0;
  545. int ret;
  546. if ((ret = scale_eval_dimensions(ctx)) < 0)
  547. goto fail;
  548. outlink->w = scale->w;
  549. outlink->h = scale->h;
  550. if (scale->reset_sar)
  551. w_adj = IS_SCALE2REF(ctx) ? scale->var_values[VAR_S2R_MAIN_SAR] :
  552. scale->var_values[VAR_SAR];
  553. ret = ff_scale_adjust_dimensions(inlink, &outlink->w, &outlink->h,
  554. scale->force_original_aspect_ratio,
  555. scale->force_divisible_by, w_adj);
  556. if (ret < 0)
  557. goto fail;
  558. if (outlink->w > INT_MAX ||
  559. outlink->h > INT_MAX ||
  560. (outlink->h * inlink->w) > INT_MAX ||
  561. (outlink->w * inlink->h) > INT_MAX)
  562. av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n");
  563. /* TODO: make algorithm configurable */
  564. if (scale->reset_sar)
  565. outlink->sample_aspect_ratio = (AVRational){1, 1};
  566. else if (inlink0->sample_aspect_ratio.num){
  567. outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h * inlink0->w, outlink->w * inlink0->h}, inlink0->sample_aspect_ratio);
  568. } else
  569. outlink->sample_aspect_ratio = inlink0->sample_aspect_ratio;
  570. av_opt_get(scale->sws, "sws_flags", 0, &flags_val);
  571. av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s csp:%s range:%s sar:%d/%d -> w:%d h:%d fmt:%s csp:%s range:%s sar:%d/%d flags:%s\n",
  572. inlink ->w, inlink ->h, av_get_pix_fmt_name( inlink->format),
  573. av_color_space_name(inlink->colorspace), av_color_range_name(inlink->color_range),
  574. inlink->sample_aspect_ratio.num, inlink->sample_aspect_ratio.den,
  575. outlink->w, outlink->h, av_get_pix_fmt_name(outlink->format),
  576. av_color_space_name(outlink->colorspace), av_color_range_name(outlink->color_range),
  577. outlink->sample_aspect_ratio.num, outlink->sample_aspect_ratio.den,
  578. flags_val);
  579. av_freep(&flags_val);
  580. if (inlink->w != outlink->w || inlink->h != outlink->h) {
  581. av_frame_side_data_remove_by_props(&outlink->side_data, &outlink->nb_side_data,
  582. AV_SIDE_DATA_PROP_SIZE_DEPENDENT);
  583. }
  584. if (scale->in_primaries != scale->out_primaries || scale->in_transfer != scale->out_transfer) {
  585. av_frame_side_data_remove_by_props(&outlink->side_data, &outlink->nb_side_data,
  586. AV_SIDE_DATA_PROP_COLOR_DEPENDENT);
  587. }
  588. if (!IS_SCALE2REF(ctx)) {
  589. ff_framesync_uninit(&scale->fs);
  590. ret = ff_framesync_init(&scale->fs, ctx, ctx->nb_inputs);
  591. if (ret < 0)
  592. return ret;
  593. scale->fs.on_event = do_scale;
  594. scale->fs.in[0].time_base = ctx->inputs[0]->time_base;
  595. scale->fs.in[0].sync = 1;
  596. scale->fs.in[0].before = EXT_STOP;
  597. scale->fs.in[0].after = EXT_STOP;
  598. if (scale->uses_ref) {
  599. av_assert0(ctx->nb_inputs == 2);
  600. scale->fs.in[1].time_base = ctx->inputs[1]->time_base;
  601. scale->fs.in[1].sync = 0;
  602. scale->fs.in[1].before = EXT_NULL;
  603. scale->fs.in[1].after = EXT_INFINITY;
  604. }
  605. ret = ff_framesync_configure(&scale->fs);
  606. if (ret < 0)
  607. return ret;
  608. }
  609. return 0;
  610. fail:
  611. return ret;
  612. }
  613. static int config_props_ref(AVFilterLink *outlink)
  614. {
  615. AVFilterLink *inlink = outlink->src->inputs[1];
  616. FilterLink *il = ff_filter_link(inlink);
  617. FilterLink *ol = ff_filter_link(outlink);
  618. outlink->w = inlink->w;
  619. outlink->h = inlink->h;
  620. outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
  621. outlink->time_base = inlink->time_base;
  622. ol->frame_rate = il->frame_rate;
  623. outlink->colorspace = inlink->colorspace;
  624. outlink->color_range = inlink->color_range;
  625. return 0;
  626. }
  627. static int request_frame(AVFilterLink *outlink)
  628. {
  629. return ff_request_frame(outlink->src->inputs[0]);
  630. }
  631. static int request_frame_ref(AVFilterLink *outlink)
  632. {
  633. return ff_request_frame(outlink->src->inputs[1]);
  634. }
  635. /* Takes over ownership of *frame_in, passes ownership of *frame_out to caller */
  636. static int scale_frame(AVFilterLink *link, AVFrame **frame_in,
  637. AVFrame **frame_out)
  638. {
  639. FilterLink *inl = ff_filter_link(link);
  640. AVFilterContext *ctx = link->dst;
  641. ScaleContext *scale = ctx->priv;
  642. AVFilterLink *outlink = ctx->outputs[0];
  643. AVFrame *out, *in = *frame_in;
  644. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
  645. char buf[32];
  646. int ret, flags_orig, frame_changed;
  647. *frame_in = NULL;
  648. frame_changed = in->width != link->w ||
  649. in->height != link->h ||
  650. in->format != link->format ||
  651. in->sample_aspect_ratio.den != link->sample_aspect_ratio.den ||
  652. in->sample_aspect_ratio.num != link->sample_aspect_ratio.num ||
  653. in->colorspace != link->colorspace ||
  654. in->color_range != link->color_range;
  655. if (scale->eval_mode == EVAL_MODE_FRAME || frame_changed) {
  656. unsigned vars_w[VARS_NB] = { 0 }, vars_h[VARS_NB] = { 0 };
  657. av_expr_count_vars(scale->w_pexpr, vars_w, VARS_NB);
  658. av_expr_count_vars(scale->h_pexpr, vars_h, VARS_NB);
  659. if (scale->eval_mode == EVAL_MODE_FRAME &&
  660. !frame_changed &&
  661. !IS_SCALE2REF(ctx) &&
  662. !(vars_w[VAR_N] || vars_w[VAR_T]) &&
  663. !(vars_h[VAR_N] || vars_h[VAR_T]) &&
  664. scale->w && scale->h)
  665. goto scale;
  666. if (scale->eval_mode == EVAL_MODE_INIT) {
  667. snprintf(buf, sizeof(buf) - 1, "%d", scale->w);
  668. av_opt_set(scale, "w", buf, 0);
  669. snprintf(buf, sizeof(buf) - 1, "%d", scale->h);
  670. av_opt_set(scale, "h", buf, 0);
  671. ret = scale_parse_expr(ctx, NULL, &scale->w_pexpr, "width", scale->w_expr);
  672. if (ret < 0)
  673. goto err;
  674. ret = scale_parse_expr(ctx, NULL, &scale->h_pexpr, "height", scale->h_expr);
  675. if (ret < 0)
  676. goto err;
  677. }
  678. if (IS_SCALE2REF(ctx)) {
  679. scale->var_values[VAR_S2R_MAIN_N] = inl->frame_count_out;
  680. scale->var_values[VAR_S2R_MAIN_T] = TS2T(in->pts, link->time_base);
  681. } else {
  682. scale->var_values[VAR_N] = inl->frame_count_out;
  683. scale->var_values[VAR_T] = TS2T(in->pts, link->time_base);
  684. }
  685. link->dst->inputs[0]->format = in->format;
  686. link->dst->inputs[0]->w = in->width;
  687. link->dst->inputs[0]->h = in->height;
  688. link->dst->inputs[0]->colorspace = in->colorspace;
  689. link->dst->inputs[0]->color_range = in->color_range;
  690. link->dst->inputs[0]->sample_aspect_ratio.den = in->sample_aspect_ratio.den;
  691. link->dst->inputs[0]->sample_aspect_ratio.num = in->sample_aspect_ratio.num;
  692. if ((ret = config_props(outlink)) < 0)
  693. goto err;
  694. }
  695. scale:
  696. scale->hsub = desc->log2_chroma_w;
  697. scale->vsub = desc->log2_chroma_h;
  698. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  699. if (!out) {
  700. ret = AVERROR(ENOMEM);
  701. goto err;
  702. }
  703. if (scale->in_color_matrix != -1)
  704. in->colorspace = scale->in_color_matrix;
  705. if (scale->in_primaries != -1)
  706. in->color_primaries = scale->in_primaries;
  707. if (scale->in_transfer != -1)
  708. in->color_trc = scale->in_transfer;
  709. if (scale->in_range != AVCOL_RANGE_UNSPECIFIED)
  710. in->color_range = scale->in_range;
  711. in->chroma_location = scale->in_chroma_loc;
  712. flags_orig = in->flags;
  713. if (scale->interlaced > 0)
  714. in->flags |= AV_FRAME_FLAG_INTERLACED;
  715. else if (!scale->interlaced)
  716. in->flags &= ~AV_FRAME_FLAG_INTERLACED;
  717. av_frame_copy_props(out, in);
  718. out->width = outlink->w;
  719. out->height = outlink->h;
  720. out->color_range = outlink->color_range;
  721. out->colorspace = outlink->colorspace;
  722. out->alpha_mode = outlink->alpha_mode;
  723. if (scale->out_chroma_loc != AVCHROMA_LOC_UNSPECIFIED)
  724. out->chroma_location = scale->out_chroma_loc;
  725. if (scale->out_primaries != -1)
  726. out->color_primaries = scale->out_primaries;
  727. if (scale->out_transfer != -1)
  728. out->color_trc = scale->out_transfer;
  729. if (out->width != in->width || out->height != in->height) {
  730. av_frame_side_data_remove_by_props(&out->side_data, &out->nb_side_data,
  731. AV_SIDE_DATA_PROP_SIZE_DEPENDENT);
  732. }
  733. if (in->color_primaries != out->color_primaries || in->color_trc != out->color_trc) {
  734. av_frame_side_data_remove_by_props(&out->side_data, &out->nb_side_data,
  735. AV_SIDE_DATA_PROP_COLOR_DEPENDENT);
  736. }
  737. if (scale->reset_sar) {
  738. out->sample_aspect_ratio = outlink->sample_aspect_ratio;
  739. } else {
  740. av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
  741. (int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
  742. (int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
  743. INT_MAX);
  744. }
  745. if (sws_is_noop(out, in)) {
  746. av_frame_free(&out);
  747. in->flags = flags_orig;
  748. *frame_out = in;
  749. return 0;
  750. }
  751. if (out->format == AV_PIX_FMT_PAL8) {
  752. out->format = AV_PIX_FMT_BGR8;
  753. avpriv_set_systematic_pal2((uint32_t*) out->data[1], out->format);
  754. }
  755. ret = sws_scale_frame(scale->sws, out, in);
  756. av_frame_free(&in);
  757. out->flags = flags_orig;
  758. out->format = outlink->format; /* undo PAL8 handling */
  759. if (ret < 0)
  760. av_frame_free(&out);
  761. *frame_out = out;
  762. return ret;
  763. err:
  764. av_frame_free(&in);
  765. return ret;
  766. }
  767. static int do_scale(FFFrameSync *fs)
  768. {
  769. AVFilterContext *ctx = fs->parent;
  770. ScaleContext *scale = ctx->priv;
  771. AVFilterLink *outlink = ctx->outputs[0];
  772. AVFrame *out, *in = NULL, *ref = NULL;
  773. int ret = 0, frame_changed;
  774. ret = ff_framesync_get_frame(fs, 0, &in, 1);
  775. if (ret < 0)
  776. goto err;
  777. if (scale->uses_ref) {
  778. ret = ff_framesync_get_frame(fs, 1, &ref, 0);
  779. if (ret < 0)
  780. goto err;
  781. }
  782. if (ref) {
  783. AVFilterLink *reflink = ctx->inputs[1];
  784. FilterLink *rl = ff_filter_link(reflink);
  785. frame_changed = ref->width != reflink->w ||
  786. ref->height != reflink->h ||
  787. ref->format != reflink->format ||
  788. ref->sample_aspect_ratio.den != reflink->sample_aspect_ratio.den ||
  789. ref->sample_aspect_ratio.num != reflink->sample_aspect_ratio.num ||
  790. ref->colorspace != reflink->colorspace ||
  791. ref->color_range != reflink->color_range;
  792. if (frame_changed) {
  793. reflink->format = ref->format;
  794. reflink->w = ref->width;
  795. reflink->h = ref->height;
  796. reflink->sample_aspect_ratio.num = ref->sample_aspect_ratio.num;
  797. reflink->sample_aspect_ratio.den = ref->sample_aspect_ratio.den;
  798. reflink->colorspace = ref->colorspace;
  799. reflink->color_range = ref->color_range;
  800. ret = config_props(outlink);
  801. if (ret < 0)
  802. goto err;
  803. }
  804. if (scale->eval_mode == EVAL_MODE_FRAME) {
  805. scale->var_values[VAR_REF_N] = rl->frame_count_out;
  806. scale->var_values[VAR_REF_T] = TS2T(ref->pts, reflink->time_base);
  807. }
  808. }
  809. ret = scale_frame(ctx->inputs[0], &in, &out);
  810. if (ret < 0)
  811. goto err;
  812. av_assert0(out);
  813. out->pts = av_rescale_q_rnd(fs->pts, fs->time_base, outlink->time_base, AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
  814. return ff_filter_frame(outlink, out);
  815. err:
  816. av_frame_free(&in);
  817. return ret;
  818. }
  819. static int filter_frame(AVFilterLink *link, AVFrame *in)
  820. {
  821. AVFilterContext *ctx = link->dst;
  822. AVFilterLink *outlink = ctx->outputs[0];
  823. AVFrame *out;
  824. int ret;
  825. ret = scale_frame(link, &in, &out);
  826. if (out)
  827. return ff_filter_frame(outlink, out);
  828. return ret;
  829. }
  830. static int filter_frame_ref(AVFilterLink *link, AVFrame *in)
  831. {
  832. FilterLink *l = ff_filter_link(link);
  833. ScaleContext *scale = link->dst->priv;
  834. AVFilterLink *outlink = link->dst->outputs[1];
  835. int frame_changed;
  836. frame_changed = in->width != link->w ||
  837. in->height != link->h ||
  838. in->format != link->format ||
  839. in->sample_aspect_ratio.den != link->sample_aspect_ratio.den ||
  840. in->sample_aspect_ratio.num != link->sample_aspect_ratio.num ||
  841. in->colorspace != link->colorspace ||
  842. in->color_range != link->color_range;
  843. if (frame_changed) {
  844. link->format = in->format;
  845. link->w = in->width;
  846. link->h = in->height;
  847. link->sample_aspect_ratio.num = in->sample_aspect_ratio.num;
  848. link->sample_aspect_ratio.den = in->sample_aspect_ratio.den;
  849. link->colorspace = in->colorspace;
  850. link->color_range = in->color_range;
  851. config_props_ref(outlink);
  852. }
  853. if (scale->eval_mode == EVAL_MODE_FRAME) {
  854. scale->var_values[VAR_N] = l->frame_count_out;
  855. scale->var_values[VAR_T] = TS2T(in->pts, link->time_base);
  856. }
  857. return ff_filter_frame(outlink, in);
  858. }
  859. static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
  860. char *res, int res_len, int flags)
  861. {
  862. ScaleContext *scale = ctx->priv;
  863. char *str_expr;
  864. AVExpr **pexpr_ptr;
  865. int ret, w, h;
  866. w = !strcmp(cmd, "width") || !strcmp(cmd, "w");
  867. h = !strcmp(cmd, "height") || !strcmp(cmd, "h");
  868. if (w || h) {
  869. str_expr = w ? scale->w_expr : scale->h_expr;
  870. pexpr_ptr = w ? &scale->w_pexpr : &scale->h_pexpr;
  871. ret = scale_parse_expr(ctx, str_expr, pexpr_ptr, cmd, args);
  872. } else
  873. ret = AVERROR(ENOSYS);
  874. if (ret < 0)
  875. av_log(ctx, AV_LOG_ERROR, "Failed to process command. Continuing with existing parameters.\n");
  876. return ret;
  877. }
  878. static int activate(AVFilterContext *ctx)
  879. {
  880. ScaleContext *scale = ctx->priv;
  881. return ff_framesync_activate(&scale->fs);
  882. }
  883. static const AVClass *child_class_iterate(void **iter)
  884. {
  885. switch ((uintptr_t) *iter) {
  886. case 0:
  887. *iter = (void*)(uintptr_t) 1;
  888. return sws_get_class();
  889. case 1:
  890. *iter = (void*)(uintptr_t) 2;
  891. return &ff_framesync_class;
  892. }
  893. return NULL;
  894. }
  895. static void *child_next(void *obj, void *prev)
  896. {
  897. ScaleContext *s = obj;
  898. if (!prev)
  899. return s->sws;
  900. if (prev == s->sws)
  901. return &s->fs;
  902. return NULL;
  903. }
  904. #define OFFSET(x) offsetof(ScaleContext, x)
  905. #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  906. #define TFLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
  907. static const AVOption scale_options[] = {
  908. { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS },
  909. { "width", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS },
  910. { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS },
  911. { "height","Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS },
  912. { "flags", "Flags to pass to libswscale", OFFSET(flags_str), AV_OPT_TYPE_STRING, { .str = "" }, .flags = FLAGS },
  913. { "interl", "set interlacing", OFFSET(interlaced), AV_OPT_TYPE_BOOL, {.i64 = 0 }, -1, 1, FLAGS },
  914. { "size", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, .flags = FLAGS },
  915. { "s", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, .flags = FLAGS },
  916. { "in_color_matrix", "set input YCbCr type", OFFSET(in_color_matrix), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, AVCOL_SPC_NB-1, .flags = FLAGS, .unit = "color" },
  917. { "out_color_matrix", "set output YCbCr type", OFFSET(out_color_matrix), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED }, 0, AVCOL_SPC_NB-1, .flags = FLAGS, .unit = "color"},
  918. { "auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, FLAGS, .unit = "color" },
  919. { "bt601", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_BT470BG}, 0, 0, FLAGS, .unit = "color" },
  920. { "bt470", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_BT470BG}, 0, 0, FLAGS, .unit = "color" },
  921. { "smpte170m", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_BT470BG}, 0, 0, FLAGS, .unit = "color" },
  922. { "bt470bg", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_BT470BG}, 0, 0, FLAGS, .unit = "color" },
  923. { "bt709", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_BT709}, 0, 0, FLAGS, .unit = "color" },
  924. { "fcc", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_FCC}, 0, 0, FLAGS, .unit = "color" },
  925. { "smpte240m", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_SMPTE240M}, 0, 0, FLAGS, .unit = "color" },
  926. { "bt2020", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_BT2020_NCL}, 0, 0, FLAGS, .unit = "color" },
  927. { "bt2020nc", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_BT2020_NCL}, 0, 0, FLAGS, .unit = "color" },
  928. { "in_range", "set input color range", OFFSET( in_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 2, FLAGS, .unit = "range" },
  929. { "out_range", "set output color range", OFFSET(out_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 2, FLAGS, .unit = "range" },
  930. { "auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_RANGE_UNSPECIFIED }, 0, 0, FLAGS, .unit = "range" },
  931. { "unknown", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_RANGE_UNSPECIFIED }, 0, 0, FLAGS, .unit = "range" },
  932. { "full", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_RANGE_JPEG}, 0, 0, FLAGS, .unit = "range" },
  933. { "limited", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_RANGE_MPEG}, 0, 0, FLAGS, .unit = "range" },
  934. { "jpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_RANGE_JPEG}, 0, 0, FLAGS, .unit = "range" },
  935. { "mpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_RANGE_MPEG}, 0, 0, FLAGS, .unit = "range" },
  936. { "tv", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_RANGE_MPEG}, 0, 0, FLAGS, .unit = "range" },
  937. { "pc", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_RANGE_JPEG}, 0, 0, FLAGS, .unit = "range" },
  938. { "in_chroma_loc", "set input chroma sample location", OFFSET(in_chroma_loc), AV_OPT_TYPE_INT, { .i64 = AVCHROMA_LOC_UNSPECIFIED }, 0, AVCHROMA_LOC_NB-1, .flags = FLAGS, .unit = "chroma_loc" },
  939. { "out_chroma_loc", "set output chroma sample location", OFFSET(out_chroma_loc), AV_OPT_TYPE_INT, { .i64 = AVCHROMA_LOC_UNSPECIFIED }, 0, AVCHROMA_LOC_NB-1, .flags = FLAGS, .unit = "chroma_loc" },
  940. {"auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCHROMA_LOC_UNSPECIFIED}, 0, 0, FLAGS, .unit = "chroma_loc"},
  941. {"unknown", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCHROMA_LOC_UNSPECIFIED}, 0, 0, FLAGS, .unit = "chroma_loc"},
  942. {"left", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCHROMA_LOC_LEFT}, 0, 0, FLAGS, .unit = "chroma_loc"},
  943. {"center", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCHROMA_LOC_CENTER}, 0, 0, FLAGS, .unit = "chroma_loc"},
  944. {"topleft", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCHROMA_LOC_TOPLEFT}, 0, 0, FLAGS, .unit = "chroma_loc"},
  945. {"top", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCHROMA_LOC_TOP}, 0, 0, FLAGS, .unit = "chroma_loc"},
  946. {"bottomleft", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCHROMA_LOC_BOTTOMLEFT}, 0, 0, FLAGS, .unit = "chroma_loc"},
  947. {"bottom", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCHROMA_LOC_BOTTOM}, 0, 0, FLAGS, .unit = "chroma_loc"},
  948. { "in_primaries", "set input primaries", OFFSET(in_primaries), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, AVCOL_PRI_NB-1, .flags = FLAGS, .unit = "primaries" },
  949. { "out_primaries", "set output primaries", OFFSET(out_primaries), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, AVCOL_PRI_NB-1, .flags = FLAGS, .unit = "primaries"},
  950. {"auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, FLAGS, .unit = "primaries"},
  951. {"bt709", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_BT709}, 0, 0, FLAGS, .unit = "primaries"},
  952. {"bt470m", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_BT470M}, 0, 0, FLAGS, .unit = "primaries"},
  953. {"bt470bg", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_BT470BG}, 0, 0, FLAGS, .unit = "primaries"},
  954. {"smpte170m", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_SMPTE170M}, 0, 0, FLAGS, .unit = "primaries"},
  955. {"smpte240m", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_SMPTE240M}, 0, 0, FLAGS, .unit = "primaries"},
  956. {"film", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_FILM}, 0, 0, FLAGS, .unit = "primaries"},
  957. {"bt2020", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_BT2020}, 0, 0, FLAGS, .unit = "primaries"},
  958. {"smpte428", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_SMPTE428}, 0, 0, FLAGS, .unit = "primaries"},
  959. {"smpte431", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_SMPTE431}, 0, 0, FLAGS, .unit = "primaries"},
  960. {"smpte432", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_SMPTE432}, 0, 0, FLAGS, .unit = "primaries"},
  961. {"jedec-p22", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_JEDEC_P22}, 0, 0, FLAGS, .unit = "primaries"},
  962. {"ebu3213", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_EBU3213}, 0, 0, FLAGS, .unit = "primaries"},
  963. { "in_transfer", "set output color transfer", OFFSET(in_transfer), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, AVCOL_TRC_NB-1, .flags = FLAGS, .unit = "transfer"},
  964. {"out_transfer", "set output color transfer", OFFSET(out_transfer), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, AVCOL_TRC_NB-1, .flags = FLAGS, .unit = "transfer"},
  965. {"auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, FLAGS, .unit = "transfer"},
  966. {"bt709", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_BT709}, 0, 0, FLAGS, .unit = "transfer"},
  967. {"bt470m", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_GAMMA22}, 0, 0, FLAGS, .unit = "transfer"},
  968. {"gamma22", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_GAMMA22}, 0, 0, FLAGS, .unit = "transfer"},
  969. {"bt470bg", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_GAMMA28}, 0, 0, FLAGS, .unit = "transfer"},
  970. {"gamma28", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_GAMMA28}, 0, 0, FLAGS, .unit = "transfer"},
  971. {"smpte170m", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_SMPTE170M}, 0, 0, FLAGS, .unit = "transfer"},
  972. {"smpte240m", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_SMPTE240M}, 0, 0, FLAGS, .unit = "transfer"},
  973. {"linear", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_LINEAR}, 0, 0, FLAGS, .unit = "transfer"},
  974. {"iec61966-2-1", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_IEC61966_2_1}, 0, 0, FLAGS, .unit = "transfer"},
  975. {"srgb", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_IEC61966_2_1}, 0, 0, FLAGS, .unit = "transfer"},
  976. {"iec61966-2-4", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_IEC61966_2_4}, 0, 0, FLAGS, .unit = "transfer"},
  977. {"xvycc", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_IEC61966_2_4}, 0, 0, FLAGS, .unit = "transfer"},
  978. {"bt1361e", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_BT1361_ECG}, 0, 0, FLAGS, .unit = "transfer"},
  979. {"bt2020-10", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_BT2020_10}, 0, 0, FLAGS, .unit = "transfer"},
  980. {"bt2020-12", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_BT2020_12}, 0, 0, FLAGS, .unit = "transfer"},
  981. {"smpte2084", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_SMPTE2084}, 0, 0, FLAGS, .unit = "transfer"},
  982. {"smpte428", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_SMPTE428}, 0, 0, FLAGS, .unit = "transfer"},
  983. {"arib-std-b67", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_ARIB_STD_B67}, 0, 0, FLAGS, .unit = "transfer"},
  984. { "in_v_chr_pos", "input vertical chroma position in luma grid/256" , OFFSET(in_v_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
  985. { "in_h_chr_pos", "input horizontal chroma position in luma grid/256", OFFSET(in_h_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
  986. { "out_v_chr_pos", "output vertical chroma position in luma grid/256" , OFFSET(out_v_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
  987. { "out_h_chr_pos", "output horizontal chroma position in luma grid/256", OFFSET(out_h_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
  988. { "force_original_aspect_ratio", "decrease or increase w/h if necessary to keep the original AR", OFFSET(force_original_aspect_ratio), AV_OPT_TYPE_INT, { .i64 = 0}, 0, 2, FLAGS, .unit = "force_oar" },
  989. { "disable", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, .unit = "force_oar" },
  990. { "decrease", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, .unit = "force_oar" },
  991. { "increase", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 2 }, 0, 0, FLAGS, .unit = "force_oar" },
  992. { "force_divisible_by", "enforce that the output resolution is divisible by a defined integer when force_original_aspect_ratio is used", OFFSET(force_divisible_by), AV_OPT_TYPE_INT, { .i64 = 1}, 1, 256, FLAGS },
  993. { "reset_sar", "reset SAR to 1 and scale to square pixels if scaling proportionally", OFFSET(reset_sar), AV_OPT_TYPE_BOOL, { .i64 = 0}, 0, 1, FLAGS },
  994. { "param0", "Scaler param 0", OFFSET(param[0]), AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX }, -DBL_MAX, DBL_MAX, FLAGS },
  995. { "param1", "Scaler param 1", OFFSET(param[1]), AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX }, -DBL_MAX, DBL_MAX, FLAGS },
  996. { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, .unit = "eval" },
  997. { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
  998. { "frame", "eval expressions during initialization and per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
  999. { NULL }
  1000. };
  1001. static const AVClass scale_class = {
  1002. .class_name = "scale",
  1003. .item_name = av_default_item_name,
  1004. .option = scale_options,
  1005. .version = LIBAVUTIL_VERSION_INT,
  1006. .category = AV_CLASS_CATEGORY_FILTER,
  1007. .child_class_iterate = child_class_iterate,
  1008. .child_next = child_next,
  1009. };
  1010. static const AVFilterPad avfilter_vf_scale_inputs[] = {
  1011. {
  1012. .name = "default",
  1013. .type = AVMEDIA_TYPE_VIDEO,
  1014. },
  1015. };
  1016. static const AVFilterPad avfilter_vf_scale_outputs[] = {
  1017. {
  1018. .name = "default",
  1019. .type = AVMEDIA_TYPE_VIDEO,
  1020. .config_props = config_props,
  1021. },
  1022. };
  1023. const FFFilter ff_vf_scale = {
  1024. .p.name = "scale",
  1025. .p.description = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format."),
  1026. .p.priv_class = &scale_class,
  1027. .p.flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
  1028. .preinit = preinit,
  1029. .init = init,
  1030. .uninit = uninit,
  1031. .priv_size = sizeof(ScaleContext),
  1032. FILTER_INPUTS(avfilter_vf_scale_inputs),
  1033. FILTER_OUTPUTS(avfilter_vf_scale_outputs),
  1034. FILTER_QUERY_FUNC2(query_formats),
  1035. .activate = activate,
  1036. .process_command = process_command,
  1037. };
  1038. static const AVClass *scale2ref_child_class_iterate(void **iter)
  1039. {
  1040. const AVClass *c = *iter ? NULL : sws_get_class();
  1041. *iter = (void*)(uintptr_t)c;
  1042. return c;
  1043. }
  1044. static void *scale2ref_child_next(void *obj, void *prev)
  1045. {
  1046. ScaleContext *s = obj;
  1047. if (!prev)
  1048. return s->sws;
  1049. return NULL;
  1050. }
  1051. static const AVClass scale2ref_class = {
  1052. .class_name = "scale(2ref)",
  1053. .item_name = av_default_item_name,
  1054. .option = scale_options,
  1055. .version = LIBAVUTIL_VERSION_INT,
  1056. .category = AV_CLASS_CATEGORY_FILTER,
  1057. .child_class_iterate = scale2ref_child_class_iterate,
  1058. .child_next = scale2ref_child_next,
  1059. };
  1060. static const AVFilterPad avfilter_vf_scale2ref_inputs[] = {
  1061. {
  1062. .name = "default",
  1063. .type = AVMEDIA_TYPE_VIDEO,
  1064. .filter_frame = filter_frame,
  1065. },
  1066. {
  1067. .name = "ref",
  1068. .type = AVMEDIA_TYPE_VIDEO,
  1069. .filter_frame = filter_frame_ref,
  1070. },
  1071. };
  1072. static const AVFilterPad avfilter_vf_scale2ref_outputs[] = {
  1073. {
  1074. .name = "default",
  1075. .type = AVMEDIA_TYPE_VIDEO,
  1076. .config_props = config_props,
  1077. .request_frame= request_frame,
  1078. },
  1079. {
  1080. .name = "ref",
  1081. .type = AVMEDIA_TYPE_VIDEO,
  1082. .config_props = config_props_ref,
  1083. .request_frame= request_frame_ref,
  1084. },
  1085. };
  1086. const FFFilter ff_vf_scale2ref = {
  1087. .p.name = "scale2ref",
  1088. .p.description = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format to the given reference."),
  1089. .p.priv_class = &scale2ref_class,
  1090. .preinit = preinit,
  1091. .init = init,
  1092. .uninit = uninit,
  1093. .priv_size = sizeof(ScaleContext),
  1094. FILTER_INPUTS(avfilter_vf_scale2ref_inputs),
  1095. FILTER_OUTPUTS(avfilter_vf_scale2ref_outputs),
  1096. FILTER_QUERY_FUNC2(query_formats),
  1097. .process_command = process_command,
  1098. };